# The block of messages below are only found in this file. If you'd
# like to override them for other languages, simply add the keys
# to the other resource bundles.
webapp.name=gbif:hit
webapp.version=rev432
company.name=BiNHum
company.url=http://wiki.binhum.net/web/
copyright.year=2012-

# Start verified properties

# -- button labels --
button.add=add bioDatasource
button.backup=backup
button.kill=kill
button.schedule=schedule
button.toStatic=switch to Static View
button.cleanLogs=erase logs
button.toDynamic=switch to Dynamic View
button.cancel=cancel
button.delete=delete
button.undelete=un-delete
button.hide=hide
button.unhide=un-hide
button.done=done
button.register=signup
button.save=save
button.search=search
button.upload=upload
button.reset=reset
button.login=login
button.next=next
button.filter=filter
button.writeStats=statistics report
button.writeReport=statistics report
button.generateReport=generate report
button.deleteOrphans=delete orphans
button.loadFile=upload units list
button.fileFormat=one unitID per line
button.writeMissingReport=report on missing associated units

#new methods for associated data
asso.harvestList=Harvest
asso.processList=Process harvested units

# -- Alert messages
alert.switchingToDynamic=Switching to Dynamic View
alert.switchingToStatic=Switching to Static View
backup.help=Copies BioDatasource directory (and all its contents) to the backup directory. Note this does not delete the BioDatasource. 
delete.help=Flags the BioDatasource as deleted any deletes its directory (and all its contents) plus all related jobs and logs messages.

# -- Dropdown options
option.provider=Data provider
option.datasource=Datasource
option.country=Endorsing Country


# -- Login section (decorator)
login.logout=Log out
login.login=Log in
login.unauthorised=You are not authorised to see this page - please log in


# -- Datasources list page --
datasources.explainingText=Overview of all BioDatasources managed locally, divided into 2 categories: <b>metadata updaters</b> and <b>operators</b>.<br/><i>Metadata updaters</i> gather information about the number of resources behind a given access point and create a new <i>operator</i> for each one. <br/>The <i>operators</i> are then used to manage and perform actions against that individual resource located at the given access point.<br/><br/>In order to add a new datasource, click on the button <i>add bioDatasource</i> in the right corner (bottom). <br/>Fill all the fields, be sure that you are using the correct access point url. ie, a Biocase access point should contain "pywrapper.cgi" and not "dsa_info.cgi".
datasources.title=BioDatasource List
datasources.heading=BioDatasources
biodatasources.explainingText=Overview of all BioDatasources managed locally, divided into 2 categories: <b>metadata updaters</b> and <b>operators</b>.<br/><i>Metadata updaters</i> gather information about the number of resources behind a given access point and create a new <i>operator</i> for each one. <br/>The <i>operators</i> are then used to manage and perform actions against that individual resource located at the given access point.<br/><br/>In order to add a new datasource, click on the button <i>add bioDatasource</i> in the right corner (bottom). <br/>Fill all the fields, be sure that you are using the correct access point url. ie, a Biocase access point should contain "pywrapper.cgi" and not "dsa_info.cgi".

view.deleted=Deleted BioDatasources cannot be scheduled
# -- Datasources list page end --

# -- Datasource Detail page --
bioDatasourceDetail.title=BioDatasource Detail
bioDatasourceDetail.add.heading=Add BioDatasource
bioDatasourceDetail.edit.heading=Edit BioDatasource
bioDatasourceDetail.message=Configure your BioDatasource
bioDatasourceDetail.add.message=You can create a new BioDatasource using the form below.
bioDatasourceDetail.edit.message=You can edit the BioDatasource using the form below.

bioDatasource.id=Id
bioDatasource.name=BioDatasource Name
bioDatasource.providerName=Provider Name (abbrev)
bioDatasource.providerFullName=Provider Full Name
bioDatasource.providerURL=Provider Website URL
bioDatasource.providerAddress=Provider Address
bioDatasource.url=Accesspoint URL
bioDatasource.harvesterFactory=Factory class
bioDatasource.parametersAsJSON=Parameters
bioDatasource.targetCount=Target Count
bioDatasource.harvestedCount=Count
bioDatasource.country=Country
bioDatasource.lastHarvested=Last Harvested
bioDatasource.uddiKey=UDDI Key

# -- Datasource Edit page end --

# -- Associated Datasource Detail page --
associatedDatasourceDetail.title=BioDatasource Detail
associatedDatasourceDetail.add.heading=Add BioDatasource
associatedDatasourceDetail.edit.heading=Edit BioDatasource
associatedDatasourceDetail.message=Configure your BioDatasource
associatedDatasourceDetail.add.message=You can create a new BioDatasource using the form below.
associatedDatasourceDetail.edit.message=You can edit the BioDatasource using the form below.

associatedDatasource.id=Id
associatedDatasource.name=BioDatasource Name
associatedDatasource.providerName=Provider Name (abbrev)
associatedDatasource.providerFullName=Provider Full Name
associatedDatasource.providerURL=Provider Website URL
associatedDatasource.providerAddress=Provider Address
associatedDatasource.url=Accesspoint URL
associatedDatasource.harvesterFactory=Factory class
associatedDatasource.parametersAsJSON=Parameters
associatedDatasource.targetCount=Target Count
associatedDatasource.harvestedCount=Count
associatedDatasource.country=Country
associatedDatasource.lastHarvested=Last Harvested
associatedDatasource.uddiKey=UDDI Key

toto.id=Id
toto.name=BioDatasource Name
toto.providerName=Provider Name (abbrev)
toto.url=Accesspoint URL
toto.providerFullName=Provider Full Name
toto.providerURL=Provider Website URL
toto.providerAddress=Provider Address
toto.harvesterFactory=Factory class
toto.parametersAsJSON=Parameters
toto.targetCount=Target Count
toto.harvestedCount=Count
toto.country=Country
toto.lastHarvested=Last Harvested
toto.uddiKey=UDDI Key
# -- Datasource Edit page end --


# -- Console page --
console.title=LogEvent List
console.heading=LogEvents
console.explainingText=A view of all log messages being generated by the application, automatically refreshed every few seconds

# buttons
console.pause=pause
# -- Console page end --

#Extra unit harvesting
extra.title=Supplemental units

#quality tests
quality.title=Quality tests control (launch/display/export)

#view data
viewer.title=View data from the provider (after quality improvement when possible)

#Datasource management
manager.title=Datasource management

# -- Registry page begin --
#registry.explainingText=Synchronise with the GBIF Registry. It can be auto-scheduled, and/or filtered by endorsing Node or organisation name. Default synchronisation runs one time, and synchronises all endorsed data resources from all organisations. 
#registry.explainingText-deleteOrphans=Delete orphaned Biodatasources (ie those whose corresponding organisation, resource, or service has been deleted from the Registry).
#registry.title=Registry
#registry.synchronise=Synchronise
# -- registry page end --

# -- report page begin --
report.title=Report
report.explainingText=generates an indexing report that includes indexing statistics for all BioDatasources. The outfile is written to:&nbsp;
report.dailyReport.explainingText=Compile all log events for one or more data providers into an indexing report displayed below. Filter the report by a date range, minimum log level, or by one or more provider names. The default settings compiles filtering by logs of level 'error' going back 24hrs for all data providers.
# -- report page end --

# -- job list page --
jobs.title=Job List
jobs.explainingText=A view of all operations that have been scheduled and are awaiting execution. Please note that the maximum number of operations that can be run in parallel is 500. 
jobs.kill=Job with ID
jobs.or=or
# -- job list page end --

# Multiple places
schedule.toRun=to run

# End verified properties

user.status=Logged in as: 
user.logout=Logout

# -- validator errors --
errors.invalid={0} is invalid.
errors.maxlength={0} can not be greater than {1} characters.
errors.minlength={0} can not be less than {1} characters.
errors.range={0} is not in the range {1} through {2}.
errors.required={0} is a required field.
errors.byte={0} must be an byte.
errors.date={0} is not a date.
errors.double={0} must be an double.
errors.float={0} must be an float.
errors.integer={0} must be a number.
errors.long={0} must be an long.
errors.short={0} must be an short.
errors.creditcard={0} is not a valid credit card number.
errors.email={0} is an invalid e-mail address.
errors.phone={0} is an invalid phone number.
errors.zip={0} is an invalid zip code.

# -- other errors --
errors.cancel=Operation cancelled.
errors.detail={0}
errors.general=The process did not complete. Details should follow.
errors.token=Request could not be completed. Operation is not in sequence.
errors.none=No error message was found, check your server logs.
errors.password.mismatch=Invalid username and/or password, please try again.
errors.conversion=An error occurred while converting web values to data values.
errors.twofields=The {0} field has to have the same value as the {1} field.
errors.existing.user=This username ({0}) or e-mail address ({1}) already exists.  Please try a different username.

# -- success messages --
user.added=User information for {0} has been added successfully.
user.deleted=User Profile for {0} has been deleted successfully.
user.registered=You have successfully registered for access to this application. 
user.saved=Your profile has been updated successfully.
user.updated.byAdmin=User information for {0} has been successfully updated.
newuser.email.message={0} has created an AppFuse account for you.  Your username and password information is below.

# -- error page messages --
errorPage.title=An error has occurred
errorPage.heading=Yikes!
404.title=Page Not Found
404.message=The page you requested was not found.  You might try returning to the <a href="{0}">Main Menu</a>. While you&#39;re here, how about a pretty picture to cheer you up?
403.title=Access Denied
403.message=Your current role does not allow you to view this page.  Please contact your system administrator if you believe you should have access.  In the meantime, how about a pretty picture to cheer you up?

# -- login --
login.title=Login
login.heading=Login
login.rememberMe=Remember Me
login.signup=Not a member? <a href="{0}">Signup</a> for an account.
login.passwordHint=Forgot your password?  Have your <a href="?" onmouseover="window.status='Have your password hint sent to you.'; return true" onmouseout="window.status=''; return true" title="Have your password hint sent to you." onclick="passwordHint(); return false">password hint e-mailed to you</a>.
login.passwordHint.sent=The password hint for {0} has been sent to {1}.
login.passwordHint.error=The username {0} was not found in our database.

# -- mainMenu --
menu.home=Home
menu.datasources=Datasources
menu.jobs=Jobs
menu.console=Console
#menu.registry=Registry
menu.report=Report

mainMenu.title=Main Menu
mainMenu.heading=Welcome!
mainMenu.message=<p>This is the Alpha 1.0 version of GBIF's Harvesting and Indexing Toolkit (HIT).</p><p>Please note that this version supports the harvesting of DiGIR, BioCASe, and TAPIR providers.</p><p>To begin, please choose from one of the following options:</p>
mainMenu.activeUsers=Current Users

# -- menu/link messages --
menu.admin=Administration
menu.admin.users=View Users

menu.user=Edit Profile

menu.listBioDatasources=View a list of all BioDatasources
menu.addBioDatasource=Add a new BioDatasource

menu.bioDatasources=BioDatasources
menu.bioDatasources.addBiodatasource=Add a BioDatasource
menu.bioDatasources.biodatasources=View BioDatasources
menu.bioDatasources.synchronise=Synchronise with UDDI

# -- form labels --
label.username=Username
label.password=Password

# -- general values --
icon.information=Information
icon.information.img=/images/iconInformation.gif
icon.email=E-Mail
icon.email.img=/images/iconEmail.gif
icon.warning=Warning
icon.warning.img=/images/iconWarning.gif
date.format=MM/dd/yyyy

# -- role form --
roleForm.name=Name

# -- user profile page --
userProfile.title=User Settings
userProfile.heading=User Profile
userProfile.message=Please update your information using the form below.
userProfile.admin.message=You can update this user's information using the form below.
userProfile.showMore=View More Information
userProfile.accountSettings=Account Settings
userProfile.assignRoles=Assign Roles
userProfile.cookieLogin=You cannot change passwords when logging in with the <strong>Remember Me</strong> feature.  Please logout and log back in to change passwords.

# -- user form --
user.address.address=Address
user.availableRoles=Available Roles
user.address.city=City
user.address.country=Country
user.email=E-Mail
user.firstName=First Name
user.id=Id
user.lastName=Last Name
user.password=Password
user.confirmPassword=Confirm Password
user.phoneNumber=Phone Number
user.address.postalCode=Zip
user.address.province=State
user.roles=Current Roles
user.username=Username
user.website=Website
user.visitWebsite=visit
user.passwordHint=Password Hint
user.enabled=Enabled
user.accountExpired=Expired
user.accountLocked=Locked
user.credentialsExpired=Password Expired

# -- user list page --
userList.title=User List
userList.heading=Users
userList.nousers=<span>No users found.</span>

# -- user self-registration --
signup.title=Sign Up
signup.heading=New User Registration
signup.message=Please enter your user information in the form below.
signup.email.subject=AppFuse Account Information
signup.email.message=You have successfully registered for access to AppFuse.  Your username and password information is below.

# -- active users page --
activeUsers.title=Active Users
activeUsers.heading=Active Users
activeUsers.message=The following is a list of users that have logged in and their sessions have not expired.
activeUsers.fullName=Full Name

# JSF-only messages, remove if not using JSF
javax.faces.component.UIInput.REQUIRED=This is a required field.
activeUsers.summary={0} User(s) found, displaying {1} user(s), from {2} to {3}. Page {4} / {5}

map.title=Map
map.heading=Map

# -- BioDatasource-START

# -- bioDatasource Form page --
bioDatasource.added=BioDatasource has been added successfully
bioDatasource.notAdded=BioDatasource has not been added
bioDatasource.updated=BioDatasource has been updated successfully
bioDatasource.notUpdated=BioDatasource has not been updated

# -- BioDatasourceAction --
scheduleOperation.error=Operation '{0}' could not be scheduled for BioDatasource '{1}'
scheduleOperation.error.noBiodatasource=Operation '{0}' could not be scheduled

# -- delete action --
bioDatasource.delete=BioDatasource has been deleted successfully
bioDatasource.delete.multiple=BioDatasources have been deleted successfully
bioDatasource.error.delete=BioDatasource {0} could not be deleted

# -- undelete action --
bioDatasource.undelete=BioDatasource has been un-deleted successfully
bioDatasource.undelete.multiple=BioDatasources have been un-deleted successfully
bioDatasource.error.undelete=BioDatasource {0} could not be un-deleted

# -- clean action --
bioDatasource.clean=BioDatasource has been backed-up successfully
bioDatasource.clean.multiple=All BioDatasources have been backed-up successfully
bioDatasource.error.clean=BioDatasource {0} could not be backed-up: please check to see if its directory exists

# -- JobAction --
kill.error.job=A problem occurred while killing the Job with ID #{0}
kill.error=An Exception occurred while killing the thread #{0}
kill.error.null=The thread that you tried to kill was null

##-- RegsitryAction --
#registryAction.synchroniseWithRegistry=Synchronising with Registry
#registryAction.synchroniseWithRegistry.filter.nodeKey=Synchronising with Registry, filtered by: nodeKey={0}
#registryAction.synchroniseWithRegistry.filter.providerName=Synchronising with Registry, filtered by: organisation name={0}
#registryAction.synchroniseWithRegistry.filter.nodeKeyAndProviderName=Synchronising with Registry, filtered by both: nodeKey={0} and organisation name={1}

# -- Job-START
job.id=Id
job.name=Name
job.description=Description
job.jobGroup=Job Group
job.runningGroup=Running Group
job.jobClassName=Job Class Name
job.dataAsJSON=Data As JSON
job.created=Created
job.nextFireTime=Next Fire Time
job.started=Started
job.instanceId=Instance Id

job.added=Job has been added successfully.
job.updated=Job has been updated successfully.
job.deleted=Job has been deleted successfully.



# -- LogEvent-START
logEvent.id=Id
logEvent.message=Message
logEvent.timestamp=Timestamp
logEvent.groupId=Group Id
logEvent.level=Level
logEvent.instanceId=Instance Id
logEvent.bioDatasource=Bio Datasource
logEvent.user=User
logEvent.infoAsJSON=Info As J S O N
logEvent.messageParams=Message Params

logEvent.added=LogEvent has been added successfully.
logEvent.updated=LogEvent has been updated successfully.
logEvent.deleted=LogEvent has been deleted successfully.



# -- messages common across several classes --
error.mappingFileExists=Mapping file resource {0} does not exist
error.mappingFile=A problem occurred while reading over mapping file {0}: {1}


#-- createBioDatasource method messages --
createBioDatasource=Saving new BioDatasource: {0}
createBioDatasource.exists=Updating BioDatasource with name: {0}
updateCount=The target count has been updated to: {0}
defaultCount=The target count has been defaulted to: {0}
setCount=The target count has been set to: {0}

#-- createBioDatasource method method messages --
error.createBioDatasource=An error occurred while creating new BioDatasource: {0}

#-- issueMetadata method messages --
start.issueMetadata=Start metadata update {0}
end.issueMetadata=Finished metadata update {0}

#-- updateCount method messages --
start.updateCount=Start updating count information
end.updateCount=Finished updating count information

start.updateMetadata=Start updating other metadata
end.updateMetadata=Finished updating other metadata

#-- metadataRequest method messages --
start.metadataRequest=Start metadata request
end.metadataRequest=Finished metadata request
start.metadataRequest.prepareDirectory=Start preparing directory for metadata request
end.metadataRequest.prepareDirectory=Finished preparing directory for metadata request

#-- metadataRequest method error messages --
error.metadataRequest.prepareDirectory=Error preparing directory for metadata request: {0}
error.metadataRequest.buildUrl=Metadata request could not be constructed: {0}
error.metadataRequest.writeRequest=Metadata request file could not be written: {0}
error.metadataRequest.writeResponse=Metadata response file could not be written: {0}
error.metadataRequest.parsing=Metadata response could not be parsed: {0}

#-- processMetadata method messages --
start.processMetadata=Start processing resource metadata
end.processMetadata=Finished processing resource metadata

start.processMetadataForContacts=Start processing resource contact metadata
end.processMetadataForContacts=Finished processing resource contact metadata

#-- processMetatdata error messages --
error.processMetadata.mapping=Getting metadata mapping file failed: {0} 
error.processMetadata.fileNotFound=Metadata file could not be found: {0}
error.processMetadata.parsing=Metadata file could not be parsed for element {0}: {1} 

#-- processOtherMetadata error messages --
error.processOtherMetadata.fileNotFound=(Other) Metadata file could not be found: {0}
error.processOtherMetadata.parsing=(Other) Metadata file could not be parsed: {0} 

error.createBWs=An error occurred while opening file(s) for writing: {0}
error.writeHeaders=An error occurred while trying to write header line on file: {0}
error.closeBWs=An error occurred while trying to close file(s): {0}

error.populateElementOfInterestsMapsFromMappingFile=An error occurred loading mapping file {0}: {1}

error.issueMetadata.noName=The resource name (code) was null, therefore a new BioDatasource was NOT created.
error.issueMetadata.invalidSchemaLocation=The schemaLocation ({0}) was not valid, therefore a new BioDatasource was NOT created.

error.gettingCount=The target count for {0} could not be updated

writeOutputFile=Writing to file: {0}

# -- common messages across all protocols --
inventory=Inventory
#processInventoried=Process inventoried units
harvest=Harvest
processHarvested=Process harvested records
harvestAssociatedUnits=Harvest associated units
processAssociatedUnits=Process associated units
harvestSiblingUnits=Harvest sibling units
processSiblingUnits=Process sibling units
harvestExtraUnits=Harvest the list of extra units
processExtraUnits=Process the extra units

 # -- common search method messages --
start.search=Start harvest
end.search=Finished harvest

 # -- common processInventoried method messages --
unitidRanges.written=UnitID ranges file has been written: {0}  
start.processInventoried=Start process inventoried  
end.processInventoried=Finished process inventoried
start.processInventoried.prepareDirectory=Start preparing directory for process inventoried
end.processInventoried.prepareDirectory=Finished preparing directory for process inventoried
processInventoried.namesPerRange=The maximum size of name ranges has been set to {0}

 # -- common processInventoried error method messages --
error.processInventoried.unitidRanges.encoding=UnitID ranges file could not be written because one of the names couldn't be UTF-8 encoded: {0}
error.processInventoried.unitidRanges=UnitID ranges file could not be written: {0}
error.processInventoried.prepareDirectory=Error preparing directory for process inventoried: {0}
error.processInventoried.outputFile=Error reading inventoried records file: {0}
error.processInventoried.default=Defaulting to name ranges of size = {0}

-- common processHarvested error method messages --
error.processHarvested.parsing=Harvested XML response file ( {0} ) could not be parsed: {1}
error.processHarvested.ioexception={0} could not be processed: {1}

# -- end messages common across several classes --

# -- DwcArchiveMetadataHandler/Factory --
dwcArchiveMetadata.name=DwC-Archive Metadata Factory (DwC Text Format)
dwcArchiveMetadata.url=URL
dwcArchiveMetadata.outputDirectory=Output directory
dwcArchiveMetadata.metadata=Metadata update

# -- download method messages --
dwcarchivemetadatahandler.download.start=Start download {0}
dwcarchivemetadatahandler.download.remove=Removing previously existing directory: {0}
dwcarchivemetadatahandler.download.singleText=Downloading single text file without meta.xml: {0}
dwcarchivemetadatahandler.download.singleMeta=Downloading single meta.xml file: {0}
dwcarchivemetadatahandler.download.singleArchive=Downloading single archive file: {0}
dwcarchivemetadatahandler.download.decompress=Decompressing DwC archive: {0}
dwcarchivemetadatahandler.download.end=Finished download

# -- general error messages --
error.openArchive=Problem opening archive: {0}

# -- getCoreRecordCount method error messages --
dwcarchivemetadatahandler.error.getCoreRecordCount=An error occurred while trying to get the archive core file's record count: {0}

# -- issueMetadata method messages --
dwcarchivemetadatahandler.issueMetadata.eml=The eml file could not be found in the downloaded archive. 2nd attempt: trying to download it separately from: {0}

# -- issueMetadata method messages --
dwcarchivemetadatahandler.error.unsupportedArchive=The specified archive has an unsupported format: {0}
error.writeOutputFile=An error occurred while writing to contact file ({0}): {1}

# -- DigirMetadataHandler/Factory --
digirMetadata.name=DiGIR Metadata Factory (DwC 1.0, 1.4, 1.4GE, & 1.4CE)
digirMetadata.url=URL
digirMetadata.outputDirectory=Output directory
digirMetadata.metadata=Metadata update

#-- issueMetadata method error messages --
digirmetadatahandler.error.issueMetadata.iterator=Line iterator over name resources file could not be created: {0}


#-- getMappingFile method error messages -- 
digirmetadatahandler.error.getMappingFile=Getting mapping file failed: {0} 
digirmetadatahandler.default.getMappingFile=Using default mapping file: {0}
digirmetadatahandler.default.conceptualMappingNotFound=For resource={0}: the schema location {1} was not found in the DiGIR conceptualMapping.properties file - please update this file and try again. Defaulting to schema DwC 1.0
digirmetadatahandler.default.protocolMappingNotFound=The schema location {0} was not found in the DiGIR protocolMapping.properties file - please update this file and try again. Defaulting to protocol DiGIR 1.0

#-- getProtocol method error messages -- 
digirmetadatahandler.error.getProtocol=Getting protocol name failed: {0} 
digirmetadatahandler.default.getProtocol=Using default protocol name: {0}

#-- collectResources method messages --
digirmetadatahandler.start.collectResources=Start collecting resources
digirmetadatahandler.end.collectResources=Finished collecting resources

#-- collectResources method error messages -- 
digirmetadatahandler.error.collectResources.outputFile=Error reading resources file: {0} 

#-- processAllMetadata method messages --
start.processAllMetadata=Start collecting resource metadata (including resource contact metadata)
end.processAllMetadata=Finished collecting resource metadata
digirmetadatahandler.start.processAllMetadata.prepareDirectory=Start preparing directory for writing resource/resource contact metadata output files
digirmetadatahandler.end.processAllMetadata.prepareDirectory=Finished preparing directory for writing resource/resource contact metadata output files

#-- processAllMetadata method error messages -- 
digirmetadatahandler.error.processAllMetadata.prepareDirectory=An error occurred while preparing directory for writing resource/resource contact metadata output files
digirmetadatahandler.error.processAllMetadata.createFiles=An error occurred while trying to create resource/resource contact metadata output files

# -- TapirMetadataHandler/Factory --
tapirMetadata.name=TAPIR Metadata Factory (DwC 1.4 or ABCD 1.2 & 2.06)
tapirMetadata.url=URL
tapirMetadata.outputDirectory=Output directory
tapirMetadata.metadata=Metadata update

#-- updateCount method error messages --
tapirmetadatahandler.error.updateMetadata.metadataRequest=The other metadata for {0} could not be updated
tapirmetadatahandler.error.updateMetadata.dataResourceName=The dataResourceName could not be determined - defaulting to {0} for dataResourceName and dataResourceDisplayName

#-- getCount method messages --
tapirmetadatahandler.start.getCount=Start request for count information
tapirmetadatahandler.end.getCount=Finished request for count information
tapirmetadatahandler.getCount.execute=Executing search request (for getting count information)

#-- getCount method error messages --
tapirmetadatahandler.error.getCount.fileNotFound=Search file could not be found: {0}
tapirmetadatahandler.error.getCount.parsing=Search file could not be parsed: {0} 

#-- getCapabilities method messages
tapirmetadatahandler.start.getCapabilities=Start capabilities request
tapirmetadatahandler.end.getCapabilities=Finished capabilities request
tapirmetadatahandler.start.getCapabilities.prepareDirectory=Start preparing directory for capabilities request
tapirmetadatahandler.end.getCapabilities.prepareDirectory=Finished preparing directory for capabilities request
tapirmetadatahandler.getCapabilities.execute=Executing capabilities request

#-- getCapabilities method error messages
tapirmetadatahandler.error.getCapabilities.prepareDirectory=Error preparing directory for capabilities request: {0}
tapirmetadatahandler.error.getCapabilities.buildUrl=Capabilities request could not be constructed: {0}
tapirmetadatahandler.error.getCapabilities.writeRequest=Capabilities request file could not be written: {0}
tapirmetadatahandler.error.getCapabilities.writeResponse=Capabilities response file could not be written: {0}

#-- getNamespace method messages
tapirmetadatahandler.start.getNamespace=Start getting namespace request
tapirmetadatahandler.end.getNamespace=Finished getting namespace request
tapirmetadatahandler.getNamespace.chooseNamespace=The namespace retrieved is: {0}

#-- getNamespace method error messages
tapirmetadatahandler.error.getNamespace.fileNotFound=Capabilities file could not be found: {0}
tapirmetadatahandler.error.getNamespace.parsing=Capabilities file could not be parsed: {0}
tapirmetadatahandler.default.getNamespace.chooseNamespace=Using default content namespace: {0} 

#-- getDatasetTitles method messages
tapirmetadatahandler.start.getDatasetTitles=Start getting dataset titles
tapirmetadatahandler.end.getDatasetTitles=Finished getting dataset titles

#-- getDatasetTitles method error messages
tapirmetadatahandler.error.getDatasetTitles.fileNotFound=Inventory file could not be found: {0}
tapirmetadatahandler.error.getDatasetTitles.parsing=Inventory file could not be parsed: {0}

#-- getMappingFile method error messages
tapirmetadatahandler.error.getMappingFile=Error retrieving mapping file: {0}
tapirmetadatahandler.default.getMappingFile=Using default mapping file: {0} 
tapirmetadatahandler.default.conceptualMappingNotFound=None of the namespace(s) {0} was not found in the TAPIR conceptualMapping.properties file - please update this file and try again. Defaulting to namespace http://rs.tdwg.org/dwc/dwcore/
digirmetadatahandler.default.outputModelMappingNotFound=No output model corresponding to namespace {0} was not found in the TAPIR outputModelMapping.properties file - please update this file and try again. Defaulting to outputModel http://rs.tdwg.org/tapir/cs/dwc/1.4/model/dw_core_geo_cur.xml


#-- getOutputModel method error messages
tapirmetadatahandler.error.getOutputModel=Error retrieving outputModel mapping file: {0}
tapirmetadatahandler.default.getOutputModel=Using default outputModel: {0} 

#-- getDatasetTitlePath method error messages
tapirmetadatahandler.error.getDatasetTitlePath=Error retrieving dataset title path: {0}
tapirmetadatahandler.default.getDatasetTitlePath=Using default dataset title path: {0} 

#-- retrieveTitleSupportInformation method messages
tapirmetadatahandler.start.retrieveTitleSupportInformation=Start getting information about whether dataset title is supported
tapirmetadatahandler.end.retrieveTitleSupportInformation=Finished getting information about whether dataset title is supported

#-- retrieveTitleSupportInformation method error messages
tapirmetadatahandler.error.retrieveTitleSupportInformation.fileNotFound=Capabilities file could not be found: {0}
tapirmetadatahandler.error.retrieveTitleSupportInformation.parsing=Capabilities file could not be parsed: {0}
tapirmetadatahandler.default.retrieveTitleSupportInformation=Defaulting to dataset-title NOT being searchable

#-- getInventory method messages
tapirmetadatahandler.start.getInventory=Start inventory request
tapirmetadatahandler.end.getInventory=Finished inventory request
tapirmetadatahandler.start.getInventory.prepareDirectory=Start preparing directory for inventory request
tapirmetadatahandler.end.getInventory.prepareDirectory=Finished preparing directory for inventory request
tapirmetadatahandler.getInventory.execute=Executing inventory request

#-- getInventory method error messages
tapirmetadatahandler.error.getInventory.prepareDirectory=Error preparing directory for inventory request: {0}
tapirmetadatahandler.error.getInventory.buildUrl=Inventory request could not be constructed: {0}
tapirmetadatahandler.error.getInventory.writeRequest=Inventory request file could not be written: {0}
tapirmetadatahandler.error.getInventory.writeResponse=Inventory response file could not be written: {0}

#-- getSearch method messages
tapirmetadatahandler.start.getSearch=Start search request (for count information)
tapirmetadatahandler.end.getSearch=Finished search request (for count information)
tapirmetadatahandler.start.getSearch.prepareDirectory=Start preparing directory for search request
tapirmetadatahandler.end.getSearch.prepareDirectory=Finished preparing directory for search request
tapirmetadatahandler.getSearch.execute=Executing search request

#-- getSearch method error messages
tapirmetadatahandler.error.getSearch.prepareDirectory=Error preparing directory for getting count: {0}
tapirmetadatahandler.error.getSearch.buildUrl=Search request could not be constructed: {0}
tapirmetadatahandler.error.getSearch.writeRequest=Search request file could not be written: {0}
tapirmetadatahandler.error.getSearch.writeResponse=Search response file could not be written: {0}

#-- getSettings method messages--
tapirmetadatahandler.start.getSettings=Start collecting settings information
tapirmetadatahandler.end.getSettings=Finished collecting settings information

#-- getSettings method error messages--
tapirmetadatahandler.error.getSettings.fileNotFound=Capabilities file could not be found: {0}
tapirmetadatahandler.error.getSettings.parsing=Capabilities file could not be parsed: {0}

#-- processMetadataForContacts method messages --
tapirmetadatahandler.start.processMetadataForContacts.prepareDirectory=Start preparing directory for writing resource/resource contact metadata output files
tapirmetadatahandler.end.processMetadataForContacts.prepareDirectory=Finished preparing directory for writing resource/resource contact metadata output files

#-- processMetadataForContacts method error messages -- 
tapirmetadatahandler.error.processMetadataForContacts.prepareDirectory=An error occurred while preparing directory for writing resource contact metadata output files
tapirmetadatahandler.error.processMetadataForContacts.createFiles=An error occurred while trying to create resource contact metadata output files

#-- parseNameFromUrl method error messages --
error.resourceName=There was an error parsing the resource code from the url {0} (code=url's filename). Defaulting to: {1}

# -- BiocaseMetadataHandler/Factory --
biocaseMetadata.name=BioCASe Metadata Factory (ABCD 1.2 or 2.06)
biocaseMetadata.url=URL
biocaseMetadata.outputDirectory=Output directory
biocaseMetadata.metadata=Metadata update

#-- issueMetadata method error messages -- 
biocasemetadatahandler.error.issueMetadata.parsing=Scan file could not be parsed: {0} 

#-- getCapabilities method messages -- 
biocasemetadatahandler.start.getCapabilities=Start capabilities request
biocasemetadatahandler.end.getCapabilities=Finished capabilities request
biocasemetadatahandler.start.getCapabilities.prepareDirectory=Start preparing directory for capabilities
biocasemetadatahandler.end.getCapabilities.prepareDirectory=Finished preparing directory for capabilities
biocasemetadatahandler.getCapabilities.execute=Executing capabilities request

#-- getCapabilities method error messages -- 
biocasemetadatahandler.error.getCapabilities.prepareDirectory=Error preparing directory for capabilities: {0}
biocasemetadatahandler.error.getCapabilities.buildUrl=Capabilities request could not be constructed: {0}
biocasemetadatahandler.error.getCapabilities.writeRequest=Capabilities request file could not be written: {0}
biocasemetadatahandler.error.getCapabilities.writeResponse=Capabilities response file could not be written: {0}

#-- getNamespace method messages -- 
biocasemetadatahandler.start.getNamespace=Start retrieval of namespace
biocasemetadatahandler.end.getNamespace=Finished retrieval of namespace
biocasemetadatahandler.getNamespace.chooseNamespace=The namespace retrieved is: {0}

#-- getNamespace method error messages -- 
biocasemetadatahandler.error.getNamespace.fileNotFound=Capabilities file could not be found: {0}
biocasemetadatahandler.error.getNamespace.parsing=Capabilities file could not be parsed: {0}
biocasemetadatahandler.default.getNamespace=Using default content namespace: {0} 

#-- getDatasetTitlePath method error messages --
biocasemetadatahandler.error.getDatasetTitlePath=Getting path to dataset title concept failed: {0} 
biocasemetadatahandler.default.getDatasetTitlePath=Using default dataset title path: {0} 

#-- getMappingFile method error messages -- 
biocasemetadatahandler.error.getMappingFile=Getting mapping file failed: {0} 
biocasemetadatahandler.default.getMappingFile=Using default mapping file: {0} 
biocasemetadatahandler.default.conceptualMappingNotFound=The namespace {0} was not found in the BioCASE conceptualMapping.properties file - please update this file and try again. Defaulting to namespace http://www.tdwg.org/schemas/abcd/2.06

#-- retrieveTitleSupportInformation method messages -- 
biocasemetadatahandler.start.retrieveTitleSupportInformation=Start gathering information about whether dataset-title is a searchable concept
biocasemetadatahandler.end.retrieveTitleSupportInformation=Finished gathering information about whether dataset-title is a searchable concept

#-- retrieveTitleSupportInformation error method messages -- 
biocasemetadatahandler.error.retrieveTitleSupportInformation.fileNotFound=Capabilities file could not be found: {0}
biocasemetadatahandler.error.retrieveTitleSupportInformation.parsing=Capabilities file could not be parsed: {0}
biocasemetadatahandler.default.retrieveTitleSupportInformation=Defaulting to dataset-title NOT being searchable

biocasemetadatahandler.error.datasetTitle=The dataset title is not searchable, therefore in the event that this dataset is composed of several datasets, it will not be possible to harvest individual datasets one at a time.

#-- getScan method messages -- 
biocasemetadatahandler.start.getScan=Start scan request
biocasemetadatahandler.end.getScan=Finished scan request {0}
biocasemetadatahandler.start.getScan.prepareDirectory=Start preparing directory for scan 
biocasemetadatahandler.end.getScan.prepareDirectory=Finished preparing directory for scan
biocasemetadatahandler.getScan.execute=Executing scan request

#-- getScan method error messages -- 
biocasemetadatahandler.error.getScan.prepareDirectory=Error preparing directory for scan: {0}
biocasemetadatahandler.error.getScan.buildUrl=Scan request could not be constructed: {0}
biocasemetadatahandler.error.getScan.writeRequest=Scan request file could not be written: {0}
biocasemetadatahandler.error.getScan.writeResponse=Scan response file could not be written: {0}

#-- getSearch method messages
biocasemetadatahandler.start.getSearch=Start search request (for count information)
biocasemetadatahandler.end.getSearch=Finished search request (for count information)
biocasemetadatahandler.start.getSearch.prepareDirectory=Start preparing directory for search request
biocasemetadatahandler.end.getSearch.prepareDirectory=Finished preparing directory for search request
biocasemetadatahandler.getSearch.execute=Executing search request

#-- getSearch method error messages
biocasemetadatahandler.error.getSearch.prepareDirectory=Error preparing directory for search request: {0}
biocasemetadatahandler.error.getSearch.buildUrl=Search request could not be constructed: {0}
biocasemetadatahandler.error.getSearch.writeRequest=Search request file could not be written: {0}
biocasemetadatahandler.error.getSearch.writeResponse=Search response file could not be written: {0}

#-- getCount method messages --
biocasemetadatahandler.start.getCount=Start request for count information
biocasemetadatahandler.end.getCount=Finished request for count information
biocasemetadatahandler.getCount.execute=Executing search request (for getting count information)

#-- getCount method error messages --
biocasemetadatahandler.error.getCount.fileNotFound=Search file could not be found: {0}
biocasemetadatahandler.error.getCount.parsing=Search file could not be parsed: {0} 

#-- metadataRequest method messages --
biocasemetadatahandler.start.metadataRequest=Start empty search request to gather name (+other metadata)
biocasemetadatahandler.end.metadataRequest=Finished empty search request to gather name (+other metadata)
biocasemetadatahandler.start.metadataRequest.prepareDirectory=Start preparing directory for empty search request 
biocasemetadatahandler.end.metadataRequest.prepareDirectory=Finished preparing directory for empty search request 

#-- metadataRequest method error messages --
biocasemetadatahandler.error.metadataRequest.prepareDirectory=Error preparing directory for empty search request: {0}
biocasemetadatahandler.error.metadataRequest.buildUrl=Empty search request could not be constructed: {0}
biocasemetadatahandler.error.metadataRequest.writeRequest=Empty search request file could not be written: {0}
biocasemetadatahandler.metadataRequest.execute=Executing empty search request (for getting count information)
biocasemetadatahandler.error.metadataRequest.writeResponse=Empty search response file could not be written: {0}

#-- updateOtherMetadata method error messages --
biocasemetadatahandler.error.updateOtherMetadata.dataResourceDisplayName=The dataResourceDisplayName could not be determined
biocasemetadatahandler.error.updateOtherMetadata.dataResourceName=The dataResourceName could not be determined

conceptPath.error=Error retrieving concept path (used in constructing filter for request used in retrieving the title name), defaulting to: {0}

# GBIFRegistrySynchronise
gbifRegistrySynchronise.start=Start synchronisation with registry
gbifRegistrySynchronise.end=Finished synchronisation with registry

gbifRegistrySynchronise.createBioDatasource.exists=BioDatasource exits for name: {0} and uuid: {1} - information updated.
gbifRegistrySynchronise.error.createBioDatasource.exists=BioDatasource for name: {0} and uuid: {1} - information could NOT be updated because no harvesterFactory could be set.
gbifRegistrySynchronise.error.determineHarvesterFactory=No harvester factory exists for service type (protocol) {0}
gbifRegistrySynchronise.skip=Provider {0} will be skipped, as it has not been endorsed yet
gbifRegistrySynchronise.stopped=Synchronisation with the registry has been manually stopped

# WorkerPool messages
workerPool.error.gbifLogMessage.create=Problem creating GBIF 'Harvesting' Log Message File: {0}
workerPool.error.gbifLogMessage=Couldn't open GBIF 'Harvesting' Log Message file: {0}
workerPool.error.gbifLogMessage.close=Could not close buffered writer on GBIF 'Harvesting' Log Message file: {0}
workerPool.error.newCount=Attempt to set new counts failed: {0}
workerPool.newMaxHarvestedCount=New maxHarvestedCount has been set 

# ReportAction messages
reportAction.hit=Opened connnection to HIT database
reportAction.portal=Opened connnection to Portal database
reportAction.clean=Previous indexing report file has been deleted
reportAction.create=Finished creating indexing report file:  {0}
reportAction.start=Starting generation of indexing report

# ReportAction error messages
reportAction.error.sql=A problem occurred with the connection to the database(s) while trying to generate the indexing report: {0}
reportAction.error=A problem occurred while trying to generate the indexing report: {0}

##################################################
# Harvester-digir
##################################################
# -- general messages --
digirharvester.name=DiGIR (DwC 1.2)
digirharvester.url=URL
digirharvester.outputDirectory=Output directory
digirharvestr.createDirectory=Creating new directory: {0}

# -- inventory method messages --
digirharvester.start.inventory=Start inventory
digirharvester.end.inventory=Finished inventory
digirharvester.start.inventory.prepareDirectory=Start preparing directory for inventory
digirharvester.end.inventory.prepareDirectory=Finished preparing directory for inventory
digirharvester.inventory.execute=Executing inventory request
digirharvester.inventory.paging=Not all inventory records could be returned: firing new request using lower name set to: {0}
digirharvester.inventory.recordsPerResponse=Inventory response size set to {0} records.

# -- inventory method error messages --
digirharvester.error.inventory.prepareDirectory=Error preparing directory for inventory: {0}
digirharvester.error.inventory.buildUrl=Inventory request could not be constructed: {0}
digirharvester.error.inventory.writeRequest=Inventory request file could not be written: {0}
digirharvester.error.inventory.execute=Inventory request could not be executed: {0}
digirharvester.error.inventory.writeResponse=Inventory response file could not be written: {0} 
digirharvester.error.inventory.fileNotFound=Inventory response file could not be found: {0} 
digirharvester.error.inventory.parsing=Inventory response file could not be parsed: {0} 
digirharvester.error.inventory.closeFis=File input stream of inventory file could not be closed: {0}
digirharvester.error.inventory.nullName=Name needed for next inventory request was null: {0}
digirharvester.error.inventory.mapping=Problem loading request namespace mapping file: {0}
digirharvester.inventory.incrementor=Requesting next {0} records, starting at: {1}
digirharvester.error.inventory.default=Inventory response size defaulting to {0} records.
digirharvester.error.inventory.recordsPerResponse=Error interpreting parameter recordsPerResponse {0} taken from Metadata response: Integer value expected.

 # -- processInventoried error method messages --
digirharvester.error.processInventoried.namesPerRange=Error converting maxSearchResponseRecords ( {0} ), please check that it has been set properly by the provider
digirharvester.error.processInventoried.minNameLength=Error converting minQueryTermLength ( {0} ), please check that it has been set properly by the provider

# -- harvest method messages --
digirharvester.start.search.prepareDirectory=Start preparing directory for search
digirharvester.end.search.prepareDirectory=Finished preparing directory for search
digirharvester.start.search.range=Start harvesting range [ {0} - {1} ]
digirharvester.success.search.range=Success harvesting range [ {0} - {1} ]
digirharvester.search.execute=Executing search request
digirharvester.search.requestNamespace=The request content namespace that will be used is: {0}
digirharvester.search.maxResponseSize=The maximum number of search records returned in a single response has been set to {0}
digirharvester.search.fractioned=RETRY harvesting range [ {0} - {1} ] for next {2} records starting at: {3}

 # -- harvest method error messages --
digirharvester.error.search.prepareDirectory=Error preparing directory for search: {0}
digirharvester.error.search.range=Error harvesting range [ {0} - {1} ]
digirharvester.error.search.iterator=Line iterator over name ranges file could not be created: {0}
digirharvester.error.search.append=Appending failed name ranges to pending name ranges failed: {0} 
digirharvester.error.search.writeFailed=Writing failed name range to failed name ranges file failed: {0}
digirharvester.error.search.skipRange=Skipping range: [ {0} - {1} ]
digirharvester.error.search.overwrite=Name ranges file could not be overwritten with failed name ranges file: {0}
digirharvester.error.search.reverse=Name ranges file could not be reverse engineered: {0}
digirharvester.error.search.buildUrl=Search request could not be constructed for range [ {0} - {1} ]: {2}
digirharvester.error.search.writeRequest=Search request file could not be written for range [ {0} - {1} ]: {2}
digirharvester.error.search.execute=Search request could not be executed for range [ {0} - {1} ] : {2}
digirharvester.error.search.writeResponse=Search response file could not be written for range [ {0} - {1} ]: {2}
digirharvester.error.search.writePending=Pending range file could not be written: {0}
digirharvester.error.search.fileNotFound=Search response file could not found for range [ {0} - {1} ]: {2}
digirharvester.error.search.parsing=Search response file could not be parsed for range [ {0} - {1} ]: {2}
digirharvester.error.search.closeFis=File input stream of response file could not be closed for range [ {0} - {1} ]: {2}
digirharvester.error.search.mapping=Problem loading request namespace mapping file: {0}
digirharvester.error.search.maxResponseSize=Error reading maxResponseSize ( {0} ), please check that it has been set properly by the provider
digirharvester.error.search.maxResponseSize.default=Defaulting maximum number of search records returned in a single response {0}
digirharvester.error.search.fractioned=The RETRY FAILED while harvesting range [ {0} - {1} ] for next {2} records starting at: {3}: {4}

# -- processHarvested method messages --
digirharvester.processHarvested=Process harvested records
digirharvester.harvestSiblingUnits=Harvest sibling units
digirharvester.processSiblingUnits=Process sibling units
digirharvester.start.processHarvested.prepareDirectory=Start preparing directory for process harvested
digirharvester.end.processHarvested.prepareDirectory=Finished preparing directory for process harvested
digirharvester.start.processHarvested=Start process harvested
digirharvester.end.processHarvested=Finished process harvested
digirharvester.processHarvested.write=Header line of harvested tab file has been written successfully


# -- processHarvested method error messages --
digirharvester.error.processHarvested=Harvested records could not be processed: {0}
digirharvester.error.processHarvested.createFis=File writer to harvested file could not be created
digirharvester.error.processHarvested.writeError=Header line of harvested tab file could not be written
digirharvester.error.processHarvested.closeFis=File input stream of harvested tab file could not be closed
digirharvester.error.processHarvested.prepareDirectory=Error preparing directory for process harvested: {0}
digirharvester.error.processHarvested.mapping=Problem loading index mapping file: {0}
digirharvester.error.processHarvested.outputFile=Error reading harvested records file: {0}

digirharvester.error.closeBW=Could not close buffered writer for file {0}: {1}
digirharvester.error.openBW=Could not open buffered writer for file {0}: {1}

# -- helper method messages --
digirharvester.error.mappingFileExists=Mapping file resource {0} does not exist

##################################################
# Harvester-biocase
##################################################
biocaseharvester.name=BioCASe (ABCD 2.06)
biocaseharvester.url=URL
biocaseharvester.namesPerRange=Names per range for harvesting
biocaseharvester.outputDirectory=Output directory

# -- inventory method messages --
biocaseharvester.inventory=Inventory
biocaseharvester.start.inventory=<<< Starting inventory for dataset {0}
biocaseharvester.end.inventory=<b>>>> Inventory finished</b>
biocaseharvester.start.inventory.prepareDirectory=Start preparing directory for inventory
biocaseharvester.end.inventory.prepareDirectory=Finished preparing directory for inventory
biocaseharvester.inventory.execute=Executing inventory request
biocaseharvester.inventory.paging=Not all inventory records could be returned: firing new request using start index set to: {0}

# -- inventory method error messages --
biocaseharvester.error.inventory.prepareDirectory=Error preparing directory for inventory: {0}
biocaseharvester.error.inventory.buildUrl=Inventory request could not be constructed: {0}
biocaseharvester.error.inventory.writeRequest=Inventory request file could not be written: {0}
biocaseharvester.error.inventory.writeResponse=Inventory response file could not be written: {0} 
biocaseharvester.error.inventory.fileNotFound=Inventory response file could not be found: {0} 
biocaseharvester.error.inventory.parsing=Inventory response file could not be parsed: {0} 
biocaseharvester.error.inventory.closeFis=File input stream of inventory file could not be closed: {0}
biocaseharvester.error.inventory.nullName=Name needed for next inventory request was null: {0}
biocaseharvester.error.inventory.mapping=Problem loading request namespace mapping file: {0}

# -- harvest method messages --
biocaseharvester.start.search=<<< Starting harvest of BioCASe web service
biocaseharvester.end.search=<b>>>> BioCASe harvest finished</b>
biocaseharvester.start.search.prepareDirectory=Start preparing directory for search
biocaseharvester.end.search.prepareDirectory=Finished preparing directory for search
biocaseharvester.start.search.range=Harvesting range {0}
biocaseharvester.success.search.range=Success harvesting range [ {0} - {1} ]
biocaseharvester.search.execute=Executing search request
biocaseharvester.search.requestNamespace=The request content namespace that will be used is: {0}
biocaseharvester.search.paging=Not all search records could be returned: firing new request using start index set to: {0}
biocaseharvester.search.getids=Getting list of units to harvest

 # -- harvest method error messages --
biocaseharvester.error.search.prepareDirectory=Error preparing directory for search: {0}
biocaseharvester.error.search.range=Error harvesting range [ {0} - {1} ]
biocaseharvester.error.search.iterator=Line iterator over name ranges file could not be created: {0}
biocaseharvester.error.search.append=Appending failed name ranges to pending name ranges failed: {0} 
biocaseharvester.error.search.writeFailed=Writing failed name range to failed name ranges file failed: {0}
biocaseharvester.error.search.skipRange=Skipping range: [ {0} - {1} ]
biocaseharvester.error.search.overwrite=Name ranges file could not be overwritten with failed name ranges file: {0}
biocaseharvester.error.search.reverse=Name ranges file could not be reverse engineered: {0}
biocaseharvester.error.search.buildUrl=Search request could not be constructed for range [ {0} - {1} ]: {2}
biocaseharvester.error.search.writeRequest=Search request file could not be written for range [ {0} - {1} ]: {2}
biocaseharvester.error.search.writeResponse=Search response file could not be written for range [ {0} - {1} ]: {2}
biocaseharvester.error.search.writePending=Pending range file could not be written: {0}
biocaseharvester.error.search.fileNotFound=Search response file could not found for range [ {0} - {1} ]: {2}
biocaseharvester.error.search.parsing.range=Search response file could not be parsed for range [ {0} - {1} ]: {2}
biocaseharvester.error.search.closeFis=File input stream of response file could not be closed for range [ {0} - {1} ]: {2}
biocaseharvester.error.search.mapping=Problem loading request namespace mapping file: {0}

# -- processHarvested method messages --
biocaseharvester.start.processHarvested=<<< Starting XML processing
biocaseharvester.harvestSiblingUnits=Harvest sibling units
biocaseharvester.processSiblingUnits=Process sibling units
biocaseharvester.end.processHarvested=<b>>>> XML processing finished</b>
biocaseharvester.start.processHarvested.prepareDirectory=Start preparing directory for process harvested
biocaseharvester.end.processHarvested.prepareDirectory=Finished preparing directory for process harvested
biocaseharvester.processHarvested.write=Header line of harvested tab file has been written successfully
biocaseharvester.processHarvested.alert=Processing harvested records in directory: {0}


# -- processHarvested method error messages --
biocaseharvester.error.processHarvested=Harvested records could not be processed: {0}
biocaseharvester.error.processHarvested.createFis=File writers to harvested files could not be created
biocaseharvester.error.processHarvested.writeError=Header line of harvested tab file could not be written
biocaseharvester.error.processHarvested.closeFis=File input stream of harvested tab file could not be closed
biocaseharvester.error.processHarvested.prepareDirectory=Error preparing directory for process harvested: {0}
biocaseharvester.error.processHarvested.mapping=Problem reading index mapping file: {0}
biocaseharvester.error.processHarvested.createFiles=Not all harvested record output files could be created
biocaseharvester.error.processHarvested.writeHeaders=Not all column header lines could be written to the record output files
biocaseharvester.error.processHarvested.setXPath=Problem setting XPaths
biocaseharvester.error.processHarvested.outputFile=Error reading harvested records file: {0}

biocaseharvester.error.closeBW=Could not close buffered writer for file {0}: {1}
biocaseharvester.error.openBW=Could not open buffered writer for file {0}: {1}

# -- helper method messages --
biocaseharvester.error.mappingFileExists=Mapping file resource {0} does not exist
biocaseharvester.error.mappingFile=A problem occurred while reading over mapping file {0}: {1}
biocaseharvester.error.populateElementOfInterestsMapsFromMappingFile=A problem occurred while analysing elements of interest from mapping file {0}: {1}

##################################################
# Harvester-tapir
##################################################
# -- general messages --
tapirharvester.name=TAPIR (DwC 1.4 with geo + cur extensions)
tapirharvester.url=URL
tapirharvester.outputDirectory=Output directory

# -- inventory method messages --
tapirharvester.inventory=Inventory
tapirharvester.start.inventory=Start inventory
tapirharvester.end.inventory=Finished inventory
tapirharvester.start.inventory.prepareDirectory=Start preparing directory for inventory
tapirharvester.end.inventory.prepareDirectory=Finished preparing directory for inventory
tapirharvester.inventory.paging=Not all inventory records could be returned: firing new request using lower name set to: {0}

# -- inventory method error messages --
tapirharvester.error.inventory.prepareDirectory=Error preparing directory for inventory: {0}
tapirharvester.error.inventory.paging=Paging inventory request(s) failed: {0} 

 # -- TAPIR processInventoried error method messages --
tapirharvester.error.processInventoried.namesPerRange=Error converting maxResponseSize ( {0} ), please check that it has been set properly by the provider
tapirharvester.error.processInventoried.minNameLength=Error converting minQueryTermLength ( {0} ), please check that it has been set properly by the provider

# -- harvest method messages --
tapirharvester.start.search.prepareDirectory=Start preparing directory for search
tapirharvester.end.search.prepareDirectory=Finished preparing directory for search
tapirharvester.start.search.range=Start harvesting range [ {0} - {1} ]
tapirharvester.success.search.range=Success harvesting range [ {0} - {1} ]
tapirharvester.search.requestNamespace=The request content namespace that will be used is: {0}
tapirharvester.search.paging=Not all search records could be returned: firing new request using start index set to: {0}

tapirharvester.search.maxResponseSize=The maximum number of search records returned in a single response has been set to {0}
tapirharvester.search.fractioned=RETRY harvesting range [ {0} - {1} ] for next {2} records by setting [&start={3}] in the URL

 # -- harvest method error messages --
tapirharvester.error.search.prepareDirectory=Error preparing directory for search: {0}
tapirharvester.error.search.range=Error harvesting range [ {0} - {1} ]
tapirharvester.error.search.iterator=Line iterator over name ranges file could not be created: {0}
tapirharvester.error.search.append=Appending failed name ranges to pending name ranges failed: {0} 
tapirharvester.error.search.writeFailed=Writing failed name range to failed name ranges file failed: {0}
tapirharvester.error.search.skipRange=Skipping range: [ {0} - {1} ]
tapirharvester.error.search.overwrite=Name ranges file could not be overwritten with failed name ranges file: {0}
tapirharvester.error.search.reverse=Name ranges file could not be reverse engineered: {0}
tapirharvester.error.search.buildUrl=Search request could not be constructed for range [ {0} - {1} ]: {2}
tapirharvester.error.search.writeRequest=Search request file could not be written for range [ {0} - {1} ]: {2}
tapirharvester.error.search.writeResponse=Search response file could not be written for range [ {0} - {1} ]: {2}
tapirharvester.error.search.writePending=Pending range file could not be written: {0}
tapirharvester.error.search.fileNotFound=Search response file could not found for range [ {0} - {1} ]: {2}
tapirharvester.error.search.closeFis=File input stream of response file could not be closed for range [ {0} - {1} ]: {2}
tapirharvester.error.search.mapping=Problem loading request namespace mapping file: {0}
tapirharvester.error.search.encode=Problem encoding the names of name range [ {0} & {1} ]: {2}

tapirharvester.error.search.maxResponseSize=Error reading maxSearchResponseRecords ( {0} ), please check that it has been set properly by the provider
tapirharvester.error.search.maxResponseSize.default=Defaulting maximum number of search records returned in a single response {0}
tapirharvester.error.search.fractioned=The RETRY FAILED while harvesting range [ {0} - {1} ] for next {2} records with [&start={3}] in the URL: {4}

# -- processHarvested method messages --
tapirharvester.processHarvested=Process harvested records
tapirharvester.harvestSiblingUnits=Harvest sibling units
tapirharvester.processSiblingUnits=Process sibling units
tapirharvester.start.processHarvested=Start process harvested
tapirharvester.end.processHarvested=Finished process harvested
tapirharvester.start.processHarvested.prepareDirectory=Start preparing directory for process harvested
tapirharvester.end.processHarvested.prepareDirectory=Finished preparing directory for process harvested
tapirharvester.processHarvested.write=Header line of harvested tab file has been written successfully


# -- processHarvested method error messages --
tapirharvester.error.processHarvested=Harvested records could not be processed: {0}
tapirharvester.error.processHarvested.createFis=File writers to harvested files could not be created
tapirharvester.error.processHarvested.writeError=Header line of harvested tab file could not be written
tapirharvester.error.processHarvested.closeFis=File input stream of harvested tab file could not be closed
tapirharvester.error.processHarvested.prepareDirectory=Error preparing directory for process harvested: {0}
tapirharvester.error.processHarvested.mapping=Problem reading index mapping file: {0}
tapirharvester.error.processHarvested.createFiles=Not all harvested record output files could be created
tapirharvester.error.processHarvested.writeHeaders=Not all column header lines could be written to the record output files
tapirharvester.error.processHarvested.setXPath=Problem setting XPaths

# -- helper method messages --
tapirharvester.error.createNameRanges=Encountered an invalid line in inventory: Please see line {0} of: {1}
tapirharvester.error.setValues=A problem occurred while reading mapping file {0}: {1}
tapirharvester.error.mappingFileExists=Mapping file resource {0} does not exist
tapirharvester.error.populateElementOfInterestsMapsFromMappingFile=A problem occurred while analysing elements of interest from mapping file {0}: {1}

##################################################
# Harvester-dwcarchive
##################################################
dwcarchive.name=DwC Archive (compressed archive,single data text or meta.xml file)
dwcarchive.url=URL
dwcarchive.outputDirectory=Output directory
dwcarchive.download=Download
dwcarchive.processHarvested=Process harvested records
dwcarchive.harvestSiblingUnits=Harvest sibling units
dwcarchive.processSiblingUnits=Process sibling units
dwcarchive.numCoreTerms=Number of core terms: {0}
dwcarchive.numConstantCoreTerms=Number of core terms that represent constants: {0}

# -- download method messages --
dwcarchive.download.start=<<< Starting download {0}
dwcarchive.download.remove=Removing previously existing directory {0}
dwcarchive.download.singleText=Downloading single text file without meta.xml {0}
dwcarchive.download.singleMeta=Downloading single meta.xml file {0}
dwcarchive.download.singleArchive=Downloading single archive file {0}
dwcarchive.download.decompress=Decompressing DwC archive {0}
dwcarchive.download.end=<b>>>> Download finished</b>

# -- download method error messages --
dwcarchive.error.download=An error occurred during the download: {0}

# -- processHarvested method messages --
dwcarchive.start.processHarvested=<<< Starting DwC archive processing
dwcarchive.end.processHarvested=<b>>>> DwC archive processing finished</b>
dwcarchive.processHarvested.openArchive=Opening archive: {0}
dwcarchive.start.processHarvested.prepareDirectory=Start preparing directory for process harvested
dwcarchive.end.processHarvested.prepareDirectory=Finished preparing directory for process harvested
dwcarchive.end.processHarvested.writeOutputFile=Writing to file: {0}
dwcarchive.processHarvested.write=Header line of harvested tab file has been written successfully
dwcarchive.processHarvested.coreFiles=Core file(s) found: {0}
dwcarchive.processHarvested.coreRowType=Core row type: {0}
dwcarchive.processHarvested.coreIdColumn=Core identifier column: {0}
dwcarchive.processHarvested.validate=Found valid concept term: {0}
dwcarchive.processHarvested.termIndex=Core file has term (of interest) {0} at index: {1}
dwcarchive.processHarvested.termConstant=Core file has constant term (of interest) {0} with value: {1}
dwcarchive.processHarvested.operating=Operating on the core file: {0}
dwcarchive.processHarvested.terminatorType=Core file has terminator of type {0}
dwcarchive.processHarvested.fileType=Core file is of type {0}
dwcarchive.processHarvested.remoteFile=A remote TEXT file has been described and will be retrieved from: {0}

# -- processHarvested method error messages --
dwcarchive.error.processHarvested.directory=Cannot find archive directory: {0}
dwcarchive.error.processHarvested.openArchive=Problem opening archive: {0}
dwcarchive.error.processHarvested.unsupportedArchive=The specified archive has an unsupported format: {0}
dwcarchive.error.processHarvested.core=Cannot locate the core data file
dwcarchive.error.processHarvested.indexMapping=Couldn't finish reading core data file's indexMapping file ({0}): 
dwcarchive.error.processHarvested.validate=Concept term {0} couldn't be validated against the list of DwC/GBIF/IUCN/IPT terms
dwcarchive.error.processHarvested.output=Output file {0} could not be created because directory {1} does not exist
dwcarchive.error.processHarvested.prepareDirectory=Error preparing directory for processHarvested: {0}
dwcarchive.error.processHarvested.createOutputFile=Error creating output file ({0}): {1}
dwcarchive.error.processHarvested.badRow=A bad row was encountered on line {0}: {1}
dwcarchive.error.processHarvested.outOfBounds=Index out of bounds error on line {0}. Does the file ({1}) contain the same number of indices specified in the meta file? 
dwcarchive.error.processHarvested=Harvested records could not be processed: {0}
dwcarchive.error.processHarvested.noIndex=Index {0} does not exist
dwcarchive.error.processHarvested.checkIndex=Please check to see whether index: {0} exists

dwcarchive.error.mappingFileExists=Mapping file resource {0} does not exist


#abcdarchive
abcdarchive.harvest=Harvesting ABCD archive
abcdarchive.processHarvested=Processing ABCD archive
abcdarchive.download=Downloading ABCD archive
abcdarchive.download.start=<<< Starting download of ABCD archive
abcdarchive.download.end=<b>>>> ABCD archive downloaded</b>
abcdarchive.download.remove=Deleting the ABCD archive
abcdarchive.download.singleArchive=Downloading single archive file {0}
abcdarchive.download.decompress=Unpacking archive file and calculating check sums
abcdarchive.download.checksum=Calculating checksum {0} 
abcdarchive.download.singleText=Downloading text file {0}
abcdarchive.error.download=An error occurred during the download {0}
abcdarchive.processHarvested.start=<<< Starting XML processing
abcdarchive.processHarvested.end=<b>>>> XML processing finished</b>