File diff-503.patch of Package Bacula

diff --git a/bacula/autoconf/configure.in b/bacula/autoconf/configure.in
index 0ce9d94..7db5167 100644
--- a/bacula/autoconf/configure.in
+++ b/bacula/autoconf/configure.in
@@ -1755,11 +1755,22 @@ LIBS="${saved_LIBS} ${SQL_LFLAGS}"
    
 dnl Check if postgresql can support batch mode
 if test x$DB_TYPE = xpostgresql; then
+   support_batch_insert=yes
    AC_CHECK_LIB(pq, PQisthreadsafe, AC_DEFINE(HAVE_PQISTHREADSAFE, 1, [Set if have PQisthreadsafe]))
    AC_CHECK_LIB(pq, PQputCopyData, AC_DEFINE(HAVE_PQ_COPY, 1, [Set if have PQputCopyData]))
-   if test "x$ac_cv_lib_pq_PQputCopyData" != "xyes"
-    then
-	support_batch_insert=no
+   test "x$ac_cv_lib_pq_PQputCopyData" = "xyes"
+   pkg=$?
+   if test $pkg = 0; then
+      AC_ARG_ENABLE(batch-insert,
+	 AC_HELP_STRING([--enable-batch-insert], [enable the DB batch insert code @<:@default=no@:>@]),
+	 [
+	     if test x$enableval = xno; then
+		support_batch_insert=no
+	     fi
+	 ]
+      )
+   else
+      support_batch_insert=no
    fi
 fi
 
@@ -1771,7 +1782,7 @@ if test x$DB_TYPE = xdbi; then
    if test $DB_PROG = postgresql; then
       AC_CHECK_LIB(pq, PQisthreadsafe, AC_DEFINE(HAVE_PQISTHREADSAFE))
       AC_CHECK_LIB(pq, PQputCopyData, AC_DEFINE(HAVE_PQ_COPY))
-      test "x$ac_cv_lib_pq_PQputCopyData" != "xyes"
+      test "x$ac_cv_lib_pq_PQputCopyData" = "xyes"
       pkg=$?
    fi
 
diff --git a/bacula/configure b/bacula/configure
index 1c24d95..ec64487 100755
--- a/bacula/configure
+++ b/bacula/configure
@@ -29048,6 +29048,7 @@ saved_LIBS="${LIBS}"
 LIBS="${saved_LIBS} ${SQL_LFLAGS}"
 
 if test x$DB_TYPE = xpostgresql; then
+   support_batch_insert=yes
    { echo "$as_me:$LINENO: checking for PQisthreadsafe in -lpq" >&5
 echo $ECHO_N "checking for PQisthreadsafe in -lpq... $ECHO_C" >&6; }
 if test "${ac_cv_lib_pq_PQisthreadsafe+set}" = set; then
@@ -29186,9 +29187,21 @@ _ACEOF
 
 fi
 
-   if test "x$ac_cv_lib_pq_PQputCopyData" != "xyes"
-    then
-	support_batch_insert=no
+   test "x$ac_cv_lib_pq_PQputCopyData" = "xyes"
+   pkg=$?
+   if test $pkg = 0; then
+      # Check whether --enable-batch-insert was given.
+if test "${enable_batch_insert+set}" = set; then
+  enableval=$enable_batch_insert;
+	     if test x$enableval = xno; then
+		support_batch_insert=no
+	     fi
+
+
+fi
+
+   else
+      support_batch_insert=no
    fi
 fi
 
@@ -29333,7 +29346,7 @@ _ACEOF
 
 fi
 
-      test "x$ac_cv_lib_pq_PQputCopyData" != "xyes"
+      test "x$ac_cv_lib_pq_PQputCopyData" = "xyes"
       pkg=$?
    fi
 
diff --git a/bacula/platforms/rpm/bacula-bat.spec b/bacula/platforms/rpm/bacula-bat.spec
index 2c57c6e..a1abad0 100644
--- a/bacula/platforms/rpm/bacula-bat.spec
+++ b/bacula/platforms/rpm/bacula-bat.spec
@@ -1,6 +1,6 @@
 # Bacula RPM spec file
 #
-# Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+# Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
 
 # Platform Build Configuration
 
@@ -92,7 +92,7 @@ Name: bacula-bat
 Version: %{_version}
 Release: %{_release}
 Group: System Environment/Daemons
-License: GPL v2
+License: AGPLv3
 BuildRoot: %{_tmppath}/%{name}-root
 URL: http://www.bacula.org/
 Vendor: The Bacula Team
@@ -136,7 +136,7 @@ BuildRequires: freetype-devel
 %define blurb5 it is a network client/server based backup program. Bacula is relatively
 %define blurb6 easy to use and efficient, while offering many advanced storage management
 %define blurb7 features that make it easy to find and recover lost or damaged files.
-%define blurb8 Bacula source code has been released under the GPL version 2 license.
+%define blurb8 Bacula source code has been released under the AGPL version 3 license.
 
 Summary: Bacula - The Network Backup Solution
 Group: System Environment/Daemons
diff --git a/bacula/platforms/rpm/bacula-docs.spec b/bacula/platforms/rpm/bacula-docs.spec
index e1d22cb..32c91c4 100644
--- a/bacula/platforms/rpm/bacula-docs.spec
+++ b/bacula/platforms/rpm/bacula-docs.spec
@@ -1,6 +1,6 @@
 # Bacula RPM spec file
 #
-# Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+# Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
 
 # Platform Build Configuration
 
@@ -26,7 +26,7 @@ Name: bacula-docs
 Version: %{_version}
 Release: %{_release}
 Group: System Environment/Daemons
-License: GPL v2
+License: AGPLv3
 BuildRoot: %{_tmppath}/%{name}-root
 URL: http://www.bacula.org/
 Vendor: The Bacula Team
@@ -48,7 +48,7 @@ Source: %{name}-%{_version}.tar.bz2
 %define blurb5 it is a network client/server based backup program. Bacula is relatively
 %define blurb6 easy to use and efficient, while offering many advanced storage management
 %define blurb7 features that make it easy to find and recover lost or damaged files.
-%define blurb8 Bacula source code has been released under the GPL version 2 license.
+%define blurb8 Bacula source code has been released under the AGPL version 3 license.
 
 Summary: Bacula - The Network Backup Solution
 Group: System Environment/Daemons
diff --git a/bacula/platforms/rpm/bacula-mtx.spec b/bacula/platforms/rpm/bacula-mtx.spec
index 5b260ce..64ab00d 100644
--- a/bacula/platforms/rpm/bacula-mtx.spec
+++ b/bacula/platforms/rpm/bacula-mtx.spec
@@ -41,7 +41,7 @@ Name: bacula-mtx
 Version: %{_version}
 Release: %{_release}
 Group: System Environment/Daemons
-License: GPL v2
+License: AGPLv3
 BuildRoot: %{_tmppath}/%{name}-root
 URL: http://www.bacula.org/
 Vendor: The Bacula Team
@@ -59,7 +59,7 @@ Source: http://www.prdownloads.sourceforge.net/bacula/depkgs-%{depkgs_version}.t
 %define blurb5 it is a network client/server based backup program. Bacula is relatively
 %define blurb6 easy to use and efficient, while offering many advanced storage management
 %define blurb7 features that make it easy to find and recover lost or damaged files.
-%define blurb8 Bacula source code has been released under the GPL version 2 license.
+%define blurb8 Bacula source code has been released under the AGPL version 3 license.
 
 Summary: Bacula - The Network Backup Solution
 Group: System Environment/Daemons
diff --git a/bacula/platforms/rpm/bacula.spec b/bacula/platforms/rpm/bacula.spec
index 2a23556..bbfb066 100644
--- a/bacula/platforms/rpm/bacula.spec
+++ b/bacula/platforms/rpm/bacula.spec
@@ -6,10 +6,16 @@
 
 # basic defines for every build
 %define _release           1
-%define _version           5.0.2
+%define _version           5.0.3
 %define _packager D. Scott Barninger <barninger@fairfieldcomputers.com>
 %define depkgs_version 18Dec09
 
+%define postgres_version 7
+%define postgres_package postgresql
+%define postgres_server_package postgresql-server
+%define postgres_devel_package postgresql-devel
+
+
 %define single_dir 0
 %{?single_dir_install:%define single_dir 1}
 
@@ -19,7 +25,7 @@
 %define _sbindir       /opt/bacula/bin
 %define _bindir        /opt/bacula/bin
 %define _subsysdir     /opt/bacula/working
-%define sqlite_bindir /opt/bacula/sqlite
+%define sqlite_bindir  /opt/bacula/sqlite
 %define _mandir        /usr/share/man
 %define sysconf_dir    /opt/bacula/etc
 %define script_dir     /opt/bacula/scripts
@@ -217,7 +223,7 @@ Name: bacula
 Version: %{_version}
 Release: %{_release}
 Group: System Environment/Daemons
-License: GPL v2
+License: AGPLv3
 BuildRoot: %{_tmppath}/%{name}-root
 URL: http://www.bacula.org/
 Vendor: The Bacula Team
@@ -232,8 +238,7 @@ Source1: Release_Notes-%{version}-1.tar.gz
 %else
 Source1: Release_Notes-%{version}-%{release}.tar.gz
 %endif
-Source2: bacula-2.2.7-postgresql.patch
-Source3: http://www.prdownloads.sourceforge.net/bacula/depkgs-%{depkgs_version}.tar.gz
+Source2: http://www.prdownloads.sourceforge.net/bacula/depkgs-%{depkgs_version}.tar.gz
 
 # define the basic package description
 %define blurb Bacula - The Leading Open Source Backup Solution.
@@ -243,7 +248,7 @@ Source3: http://www.prdownloads.sourceforge.net/bacula/depkgs-%{depkgs_version}.
 %define blurb5 it is a network client/server based backup program. Bacula is relatively
 %define blurb6 easy to use and efficient, while offering many advanced storage management
 %define blurb7 features that make it easy to find and recover lost or damaged files.
-%define blurb8 Bacula source code has been released under the GPL version 2 license.
+%define blurb8 Bacula source code has been released under the AGPL version 3 license.
 
 %define user_file  /etc/passwd
 %define group_file /etc/group
@@ -447,8 +452,8 @@ BuildRequires: mysql-client
 BuildRequires: mysql
 %endif
 %if 0%{?opensuse_bs} &&  %{suse} && %{postgresql}
-BuildRequires: postgresql
-BuildRequires: postgresql-server
+BuildRequires: %{postgres_package}
+BuildRequires: %{postgres_server_package}
 %endif
 BuildRequires: openssl
 
@@ -521,11 +526,11 @@ BuildRequires: mysql-devel
 %endif
 
 %if %{postgresql} && %{wb3}
-BuildRequires: rh-postgresql-devel >= 7
+BuildRequires: rh-postgresql-devel >= %{postgres_version}
 %endif
 
 %if %{postgresql} && ! %{wb3}
-BuildRequires: postgresql-devel >= 7
+BuildRequires: %{postgres_devel_package} >= %{postgres_version}
 %endif
 
 %description
@@ -555,7 +560,7 @@ Provides: bacula-dir, bacula-sd, bacula-fd, bacula-server
 Conflicts: bacula-client
 
 Requires: ncurses, libstdc++, zlib, openssl
-Requires: glibc, readline, bacula-libs
+Requires: glibc, readline, %{name}-libs
 
 %if %{suse}
 Conflicts: bacula
@@ -628,7 +633,7 @@ Provides: bacula
 %endif
 
 Requires: libstdc++, zlib, openssl
-Requires: glibc, readline, bacula-libs
+Requires: glibc, readline, %{name}-libs
 
 %if %{suse}
 Requires: termcap
@@ -708,7 +713,6 @@ This package installs the shared libraries used by many bacula programs.
 %prep
 %setup
 %setup -T -D -b 1
-%setup -T -D -b 3
 
 %build
 
@@ -723,9 +727,6 @@ make sqlite3
 cd ${cwd}
 %endif
 
-%if %{wb3} || %{old_pgsql}
-patch -p3 src/cats/postgresql.c < %SOURCE2
-%endif
 
 %if %{sqlite}
 # patches for the bundled sqlite scripts
@@ -963,6 +964,7 @@ rm -f $RPM_BUILD_DIR/Release_Notes-%{version}-%{release}.txt
 %attr(-, root, %{daemon_group}) %{script_dir}/grant_mysql_privileges
 %attr(-, root, %{daemon_group}) %{script_dir}/startmysql
 %attr(-, root, %{daemon_group}) %{script_dir}/stopmysql
+%{_libdir}/libbacsql*
 %endif
 
 %if %{sqlite}
@@ -990,6 +992,7 @@ rm -f $RPM_BUILD_DIR/Release_Notes-%{version}-%{release}.txt
 %attr(-, root, %{daemon_group}) %{script_dir}/drop_postgresql_tables
 %attr(-, root, %{daemon_group}) %{script_dir}/update_postgresql_tables
 %attr(-, root, %{daemon_group}) %{script_dir}/grant_postgresql_privileges
+%{_libdir}/libbacsql*
 %endif
 
 # The rest is DB backend independent
@@ -1011,6 +1014,7 @@ rm -f $RPM_BUILD_DIR/Release_Notes-%{version}-%{release}.txt
 %attr(-, root, %{daemon_group}) %{script_dir}/delete_catalog_backup
 %attr(-, root, %{daemon_group}) %{script_dir}/btraceback.dbx
 %attr(-, root, %{daemon_group}) %{script_dir}/btraceback.gdb
+%attr(-, root, %{daemon_group}) %{script_dir}/btraceback.mdb
 %attr(-, root, %{daemon_group}) %{script_dir}/disk-changer
 %attr(-, root, %{daemon_group}) %{script_dir}/bacula-ctl-dir
 %attr(-, root, %{daemon_group}) %{script_dir}/bacula-ctl-fd
@@ -1288,7 +1292,7 @@ if [ -z "$DB_VER" ]; then
     echo "Granting privileges for PostgreSQL user bacula..."
     %{script_dir}/grant_postgresql_privileges
 
-# check to see if we need to upgrade a 3.x database
+# check to see if we need to upgrade a 5.0.x database
 elif [ "$DB_VER" -lt "12" ]; then
     echo "This release requires an upgrade to your bacula database."
     echo "Backing up your current database..."
@@ -1421,7 +1425,8 @@ fi
 
 %files libs
 %defattr(-,root,root)
-%{_libdir}/libbac*
+%{_libdir}/libbac-*
+%{_libdir}/libbac.*
 %{_libdir}/libbaccfg*
 %{_libdir}/libbacfind*
 %{_libdir}/libbacpy*
diff --git a/bacula/projects b/bacula/projects
index a7334e2..fd6f1e7 100644
--- a/bacula/projects
+++ b/bacula/projects
@@ -1,51 +1,55 @@
                 
 Projects:
                      Bacula Projects Roadmap 
-                    Status updated 25 February 2010
+                    Status updated 8 August 2010
 
 Summary:
 * => item complete
 
 Item  1: Ability to restart failed jobs
-Item  2: Scheduling syntax that permits more flexibility and options
-Item  3: Data encryption on storage daemon
-Item  4: Add ability to Verify any specified Job.
-Item  5: Improve Bacula's tape and drive usage and cleaning management
-Item  6: Allow FD to initiate a backup
-Item  7: Implement Storage daemon compression
-Item  8: Reduction of communications bandwidth for a backup
-Item  9: Ability to reconnect a disconnected comm line
-Item 10: Start spooling even when waiting on tape
-Item 11: Include all conf files in specified directory
-Item 12: Multiple threads in file daemon for the same job
-Item 13: Possibilty to schedule Jobs on last Friday of the month
-Item 14: Include timestamp of job launch in "stat clients" output
-Item 15: Message mailing based on backup types
-Item 16: Ability to import/export Bacula database entities
-Item 17: Implementation of running Job speed limit.
-Item 18: Add an override in Schedule for Pools based on backup types
-Item 19: Automatic promotion of backup levels based on backup size
-Item 20: Allow FileSet inclusion/exclusion by creation/mod times
-Item 21: Archival (removal) of User Files to Tape
-Item 22: An option to operate on all pools with update vol parameters
-Item 23: Automatic disabling of devices
-Item 24: Ability to defer Batch Insert to a later time
-Item 25: Add MaxVolumeSize/MaxVolumeBytes to Storage resource
-Item 26: Enable persistent naming/number of SQL queries
-Item 27: Bacula Dir, FD and SD to support proxies
-Item 28: Add Minumum Spool Size directive
-Item 29: Handle Windows Encrypted Files using Win raw encryption
-Item 30: Implement a Storage device like Amazon's S3.
-Item 31: Convert tray monitor on Windows to a stand alone program
-Item 32: Relabel disk volume after recycling
-Item 33: Command that releases all drives in an autochanger
-Item 34: Run bscan on a remote storage daemon from within bconsole.
-Item 35: Implement a Migration job type that will create a reverse
-Item 36: Job migration between different SDs
-Item 37: Concurrent spooling and despooling withini a single job.
-Item 39: Extend the verify code to make it possible to verify
-Item 40: Separate "Storage" and "Device" in the bacula-dir.conf
-Item 41: Least recently used device selection for tape drives in autochanger.
+Item  2: SD redesign
+Item  3: NDMP backup/restore
+Item  4: SAP backup/restore
+Item  5: Oracle backup/restore
+Item  6: Zimbra and Zarafa backup/restore
+Item  7: Include timestamp of job launch in "stat clients" output
+Item  8: Include all conf files in specified directory
+Item  9: Reduction of communications bandwidth for a backup
+Item 10: Concurrent spooling and despooling within a single job.
+Item 11: Start spooling even when waiting on tape
+Item 12: Add ability to Verify any specified Job.
+Item 13: Data encryption on storage daemon
+Item 14: Possibilty to schedule Jobs on last Friday of the month
+Item 15: Scheduling syntax that permits more flexibility and options
+Item 16: Ability to defer Batch Insert to a later time
+Item 17: Add MaxVolumeSize/MaxVolumeBytes to Storage resource
+Item 18: Message mailing based on backup types
+Item 19: Handle Windows Encrypted Files using Win raw encryption
+Item 20: Job migration between different SDs
+Item 19. Allow FD to initiate a backup
+Item 21: Implement Storage daemon compression
+Item 22: Ability to import/export Bacula database entities
+Item 23: Implementation of running Job speed limit.
+Item 24: Add an override in Schedule for Pools based on backup types
+Item 25: Automatic promotion of backup levels based on backup size
+Item 26: Allow FileSet inclusion/exclusion by creation/mod times
+Item 27: Archival (removal) of User Files to Tape
+Item 28: Ability to reconnect a disconnected comm line
+Item 29: Multiple threads in file daemon for the same job
+Item 30: Automatic disabling of devices
+Item 31: Enable persistent naming/number of SQL queries
+Item 32: Bacula Dir, FD and SD to support proxies
+Item 33: Add Minumum Spool Size directive
+Item 34: Command that releases all drives in an autochanger
+Item 35: Run bscan on a remote storage daemon from within bconsole.
+Item 36: Implement a Migration job type that will create a reverse
+Item 37: Extend the verify code to make it possible to verify
+Item 38: Separate "Storage" and "Device" in the bacula-dir.conf
+Item 39: Least recently used device selection for tape drives in autochanger.
+Item 40: Implement a Storage device like Amazon's S3.
+Item 41: Convert tray monitor on Windows to a stand alone program
+Item 42: Improve Bacula's tape and drive usage and cleaning management
+Item 43: Relabel disk volume after recycling
 
 
 Item  1: Ability to restart failed jobs
@@ -68,8 +72,309 @@ Item  1: Ability to restart failed jobs
   Notes: Requires Accurate to restart correctly.  Must completed have a minimum
           volume of data or files stored on Volume before enabling.
 
+Item  2: SD redesign
+   Date: 8 August 2010
+ Origin: Kern
+ Status: 
+
+  What: Various ideas for redesigns planned for the SD:
+   1. One thread per drive
+   2. Design a class structure for all objects in the SD.
+   3. Make Device into C++ classes for each device type
+   4. Make Device have a proxy (front end intercept class) that will permit control over locking and changing the real device pointer.  It can also permit delaying opening, so that we can adapt to having another program that tells us the Archive device name.
+   5. Allow plugins to create new on the fly devices
+   6. Separate SD volume manager
+   7. Volume manager tells Bacula what drive or device to use for a given volume
+  
+  Why:  It will simplify the SD, make it more modular, reduce locking
+        conflicts, and allow multiple buffer backups.
+
+
+Item  3: NDMP backup/restore                                           
+   Date: 8 August 2010
+ Origin: Bacula Systems
+ Status: Enterprise only if implemented by Bacula Systems
+
+  What:  Backup/restore via NDMP -- most important NetApp compatibility
+
+
+
+Item  4: SAP backup/restore                                           
+   Date: 8 August 2010
+ Origin: Bacula Systems
+ Status: Enterprise only if implemented by Bacula Systems
+
+  What:  Backup/restore SAP databases (MaxDB, Oracle, possibly DB2)
+
+
+
+Item  5: Oracle backup/restore                                           
+   Date: 8 August 2010
+ Origin: Bacula Systems
+ Status: Enterprise only if implemented by Bacula Systems
+
+  What:  Backup/restore Oracle databases
+
+
+Item  6: Zimbra and Zarafa backup/restore
+   Date: 8 August 2010
+ Origin: Bacula Systems
+ Status: Enterprise only if implemented by Bacula Systems
+
+  What:  Backup/restore for Zimbra and Zarafa
+
+
+
+Item  7: Include timestamp of job launch in "stat clients" output
+  Origin: Mark Bergman <mark.bergman@uphs.upenn.edu>
+  Date:  Tue Aug 22 17:13:39 EDT 2006
+  Status:
+
+  What:  The "stat clients" command doesn't include any detail on when
+          the active backup jobs were launched.
+
+  Why:   Including the timestamp would make it much easier to decide whether
+          a job is running properly. 
+
+  Notes: It may be helpful to have the output from "stat clients" formatted 
+          more like that from "stat dir" (and other commands), in a column
+          format. The per-client information that's currently shown (level,
+          client name, JobId, Volume, pool, device, Files, etc.) is good, but
+          somewhat hard to parse (both programmatically and visually), 
+          particularly when there are many active clients.
+
+
+Item  8: Include all conf files in specified directory
+Date:  18 October 2008
+Origin: Database, Lda. Maputo, Mozambique
+Contact:Cameron Smith / cameron.ord@database.co.mz 
+Status: New request
+
+What: A directive something like "IncludeConf = /etc/bacula/subconfs" Every
+      time Bacula Director restarts or reloads, it will walk the given
+      directory (non-recursively) and include the contents of any files
+      therein, as though they were appended to bacula-dir.conf
+
+Why: Permits simplified and safer configuration for larger installations with
+      many client PCs.  Currently, through judicious use of JobDefs and
+      similar directives, it is possible to reduce the client-specific part of
+      a configuration to a minimum.  The client-specific directives can be
+      prepared according to a standard template and dropped into a known
+      directory.  However it is still necessary to add a line to the "master"
+      (bacula-dir.conf) referencing each new file.  This exposes the master to
+      unnecessary risk of accidental mistakes and makes automation of adding
+      new client-confs, more difficult (it is easier to automate dropping a
+      file into a dir, than rewriting an existing file).  Ken has previously
+      made a convincing argument for NOT including Bacula's core configuration
+      in an RDBMS, but I believe that the present request is a reasonable
+      extension to the current "flat-file-based" configuration philosophy.
+ 
+Notes: There is NO need for any special syntax to these files.  They should
+       contain standard directives which are simply "inlined" to the parent
+       file as already happens when you explicitly reference an external file.
+
+Notes: (kes) this can already be done with scripting
+     From: John Jorgensen <jorgnsn@lcd.uregina.ca>
+     The bacula-dir.conf at our site contains these lines:
+
+   #
+   # Include subfiles associated with configuration of clients.
+   # They define the bulk of the Clients, Jobs, and FileSets.
+   #
+   @|"sh -c 'for f in /etc/bacula/clientdefs/*.conf ; do echo @${f} ; done'"
+
+    and when we get a new client, we just put its configuration into
+    a new file called something like:
+
+    /etc/bacula/clientdefs/clientname.conf
+
+
+
+
+Item  9: Reduction of communications bandwidth for a backup
+   Date: 14 October 2008
+ Origin: Robin O'Leary (Equiinet)
+ Status: 
+
+  What:  Using rdiff techniques, Bacula could significantly reduce
+          the network data transfer volume to do a backup.
+
+  Why:   Faster backup across the Internet
+
+  Notes: This requires retaining certain data on the client during a Full
+          backup that will speed up subsequent backups.
+     
+
+Item 10: Concurrent spooling and despooling within a single job.
+Date:  17 nov 2009
+Origin: Jesper Krogh <jesper@krogh.cc>
+Status: NEW
+What:  When a job has spooling enabled and the spool area size is
+       less than the total volumes size the storage daemon will:
+       1) Spool to spool-area
+       2) Despool to tape
+       3) Go to 1 if more data to be backed up.
+
+       Typical disks will serve data with a speed of 100MB/s when
+       dealing with large files, network it typical capable of doing 115MB/s
+       (GbitE). Tape drives will despool with 50-90MB/s (LTO3) 70-120MB/s
+       (LTO4) depending on compression and data.
+
+       As bacula currently works it'll hold back data from the client until
+       de-spooling is done, now matter if the spool area can handle another
+       block of data. Say given a FileSet of 4TB and a spool-area of 100GB and
+       a Maximum Job Spool Size set to 50GB then above sequence could be
+       changed to allow to spool to the other 50GB while despooling the first
+       50GB and not holding back the client while doing it. As above numbers
+       show, depending on tape-drive and disk-arrays this potentially leads to
+       a cut of the backup-time of 50% for the individual jobs.
+
+       Real-world example, backing up 112.6GB (large files) to LTO4 tapes
+       (despools with ~75MB/s, data is gzipped on the remote filesystem.
+       Maximum Job Spool Size = 8GB
+
+       Current:
+       Size: 112.6GB
+       Elapsed time (total time): 46m 15s => 2775s
+       Despooling time: 25m 41s => 1541s (55%)
+       Spooling time: 20m 34s => 1234s (45%)
+       Reported speed: 40.58MB/s
+       Spooling speed: 112.6GB/1234s => 91.25MB/s
+       Despooling speed: 112.6GB/1541s => 73.07MB/s
+
+       So disk + net can "keep up" with the LTO4 drive (in this test)
+
+       Prosed change would effectively make the backup run in the "despooling
+       time" 1541s giving a reduction to 55% of the total run time.
+
+       In the situation where the individual job cannot keep up with LTO-drive
+       spooling enables efficient multiplexing of multiple concurrent jobs onto
+       the same drive.
+
+Why:   When dealing with larger volumes the general utillization of the
+       network/disk is important to maximize in order to be able to run a full
+       backup over a weekend. Current work-around is to split the FileSet in
+       smaller FileSet and Jobs but that leads to more configuration mangement
+       and is harder to review for completeness. Subsequently it makes restores
+       more complex.
+
+     
+
+Item 11: Start spooling even when waiting on tape
+  Origin: Tobias Barth <tobias.barth@web-arts.com>
+  Date:  25 April 2008
+  Status:
+
+  What: If a job can be spooled to disk before writing it to tape, it should
+          be spooled immediately.  Currently, bacula waits until the correct
+          tape is inserted into the drive.
+
+  Why:   It could save hours.  When bacula waits on the operator who must insert
+          the correct tape (e.g.  a new tape or a tape from another media
+          pool), bacula could already prepare the spooled data in the spooling
+          directory and immediately start despooling when the tape was
+          inserted by the operator.
+         
+          2nd step: Use 2 or more spooling directories.  When one directory is
+          currently despooling, the next (on different disk drives) could
+          already be spooling the next data.
+
+  Notes: I am using bacula 2.2.8, which has none of those features
+         implemented.
+
+
+Item 12: Add ability to Verify any specified Job.
+Date: 17 January 2008
+Origin: portrix.net Hamburg, Germany.
+Contact: Christian Sabelmann
+Status: 70% of the required Code is part of the Verify function since v. 2.x
+
+   What:
+   The ability to tell Bacula which Job should verify instead of 
+   automatically verify just the last one.
+
+   Why: 
+   It is sad that such a powerfull feature like Verify Jobs
+   (VolumeToCatalog) is restricted to be used only with the last backup Job
+   of a client.  Actual users who have to do daily Backups are forced to
+   also do daily Verify Jobs in order to take advantage of this useful
+   feature.  This Daily Verify after Backup conduct is not always desired
+   and Verify Jobs have to be sometimes scheduled.  (Not necessarily
+   scheduled in Bacula).  With this feature Admins can verify Jobs once a
+   Week or less per month, selecting the Jobs they want to verify.  This
+   feature is also not to difficult to implement taking in account older bug
+   reports about this feature and the selection of the Job to be verified.
+          
+   Notes: For the verify Job, the user could select the Job to be verified 
+   from a List of the latest Jobs of a client. It would also be possible to 
+   verify a certain volume.  All of these would naturaly apply only for 
+   Jobs whose file information are still in the catalog.
+
+
+Item 13: Data encryption on storage daemon
+  Origin: Tobias Barth <tobias.barth at web-arts.com>
+  Date:  04 February 2009
+  Status: new
+
+  What: The storage demon should be able to do the data encryption that can
+        currently be done by the file daemon.
+
+  Why: This would have 2 advantages: 
+       1) one could encrypt the data of unencrypted tapes by doing a 
+          migration job
+       2) the storage daemon would be the only machine that would have 
+          to keep the encryption keys.
+
+  Notes from Landon:
+          As an addendum to the feature request, here are some crypto  
+          implementation details I wrote up regarding SD-encryption back in Jan  
+          2008:
+          http://www.mail-archive.com/bacula-users@lists.sourceforge.net/msg28860.html
+
+
+
+Item 14: Possibilty to schedule Jobs on last Friday of the month
+Origin: Carsten Menke <bootsy52 at gmx dot net>
+Date:   02 March 2008
+Status:
+
+   What: Currently if you want to run your monthly Backups on the last
+           Friday of each month this is only possible with workarounds (e.g
+           scripting) (As some months got 4 Fridays and some got 5 Fridays)
+           The same is true if you plan to run your yearly Backups on the
+           last Friday of the year.  It would be nice to have the ability to
+           use the builtin scheduler for this.
+
+   Why:   In many companies the last working day of the week is Friday (or 
+           Saturday), so to get the most data of the month onto the monthly
+           tape, the employees are advised to insert the tape for the
+           monthly backups on the last friday of the month.
+
+   Notes: To give this a complete functionality it would be nice if the
+           "first" and "last" Keywords could be implemented in the
+           scheduler, so it is also possible to run monthy backups at the
+           first friday of the month and many things more.  So if the syntax
+           would expand to this {first|last} {Month|Week|Day|Mo-Fri} of the
+           {Year|Month|Week} you would be able to run really flexible jobs.
+
+           To got a certain Job run on the last Friday of the Month for example
+           one could then write
+
+              Run = pool=Monthly last Fri of the Month at 23:50
+
+              ## Yearly Backup
+
+              Run = pool=Yearly last Fri of the Year at 23:50
 
-Item  2: Scheduling syntax that permits more flexibility and options
+              ## Certain Jobs the last Week of a Month
+
+              Run = pool=LastWeek last Week of the Month at 23:50
+
+              ## Monthly Backup on the last day of the month
+
+              Run = pool=Monthly last Day of the Month at 23:50
+
+Item 15: Scheduling syntax that permits more flexibility and options
    Date: 15 December 2006
   Origin: Gregory Brauer (greg at wildbrain dot com) and
           Florian Schnabel <florian.schnabel at docufy dot de>
@@ -175,125 +480,147 @@ Item  2: Scheduling syntax that permits more flexibility and options
           jobs (via Schedule syntax) into this.
 
 
-Item  3: Data encryption on storage daemon
-  Origin: Tobias Barth <tobias.barth at web-arts.com>
-  Date:  04 February 2009
-  Status: new
+Item 16: Ability to defer Batch Insert to a later time
+   Date: 26 April 2009
+ Origin: Eric
+ Status: 
 
-  What: The storage demon should be able to do the data encryption that can
-        currently be done by the file daemon.
+  What:  Instead of doing a Job Batch Insert at the end of the Job
+          which might create resource contention with lots of Job,
+          defer the insert to a later time.
 
-  Why: This would have 2 advantages: 
-       1) one could encrypt the data of unencrypted tapes by doing a 
-          migration job
-       2) the storage daemon would be the only machine that would have 
-          to keep the encryption keys.
+  Why:   Permits to focus on getting the data on the Volume and
+          putting the metadata into the Catalog outside the backup
+          window.
 
-  Notes from Landon:
-          As an addendum to the feature request, here are some crypto  
-          implementation details I wrote up regarding SD-encryption back in Jan  
-          2008:
-          http://www.mail-archive.com/bacula-users@lists.sourceforge.net/msg28860.html
+  Notes: Will use the proposed Bacula ASCII database import/export
+          format (i.e. dependent on the import/export entities project).
 
 
-Item  4: Add ability to Verify any specified Job.
-Date: 17 January 2008
-Origin: portrix.net Hamburg, Germany.
-Contact: Christian Sabelmann
-Status: 70% of the required Code is part of the Verify function since v. 2.x
+Item 17: Add MaxVolumeSize/MaxVolumeBytes to Storage resource
+   Origin: Bastian Friedrich <bastian.friedrich@collax.com>
+   Date:  2008-07-09
+   Status: -
 
-   What:
-   The ability to tell Bacula which Job should verify instead of 
-   automatically verify just the last one.
+   What:  SD has a "Maximum Volume Size" statement, which is deprecated and
+           superseded by the Pool resource statement "Maximum Volume Bytes".
+           It would be good if either statement could be used in Storage
+           resources.
 
-   Why: 
-   It is sad that such a powerfull feature like Verify Jobs
-   (VolumeToCatalog) is restricted to be used only with the last backup Job
-   of a client.  Actual users who have to do daily Backups are forced to
-   also do daily Verify Jobs in order to take advantage of this useful
-   feature.  This Daily Verify after Backup conduct is not always desired
-   and Verify Jobs have to be sometimes scheduled.  (Not necessarily
-   scheduled in Bacula).  With this feature Admins can verify Jobs once a
-   Week or less per month, selecting the Jobs they want to verify.  This
-   feature is also not to difficult to implement taking in account older bug
-   reports about this feature and the selection of the Job to be verified.
-          
-   Notes: For the verify Job, the user could select the Job to be verified 
-   from a List of the latest Jobs of a client. It would also be possible to 
-   verify a certain volume.  All of these would naturaly apply only for 
-   Jobs whose file information are still in the catalog.
+   Why:   Pools do not have to be restricted to a single storage type/device;
+           thus, it may be impossible to define Maximum Volume Bytes in the
+           Pool resource.  The old MaxVolSize statement is deprecated, as it
+           is SD side only.  I am using the same pool for different devices.
 
+   Notes: State of idea currently unknown.  Storage resources in the dir
+           config currently translate to very slim catalog entries; these
+           entries would require extensions to implement what is described
+           here.  Quite possibly, numerous other statements that are currently
+           available in Pool resources could be used in Storage resources too
+           quite well.
 
-Item  5: Improve Bacula's tape and drive usage and cleaning management 
-  Date:  8 November 2005, November 11, 2005
-  Origin: Adam Thornton <athornton at sinenomine dot net>,
-          Arno Lehmann <al at its-lehmann dot de>
+
+Item 18: Message mailing based on backup types
+ Origin: Evan Kaufman <evan.kaufman@gmail.com>
+   Date: January 6, 2006
+ Status:
+
+   What: In the "Messages" resource definitions, allowing messages
+          to be mailed based on the type (backup, restore, etc.) and level
+          (full, differential, etc) of job that created the originating
+          message(s).
+
+ Why:    It would, for example, allow someone's boss to be emailed
+          automatically only when a Full Backup job runs, so he can
+          retrieve the tapes for offsite storage, even if the IT dept.
+          doesn't (or can't) explicitly notify him.  At the same time, his
+          mailbox wouldnt be filled by notifications of Verifies, Restores,
+          or Incremental/Differential Backups (which would likely be kept
+          onsite).
+
+ Notes: One way this could be done is through additional message types, for
+ example:
+
+   Messages {
+     # email the boss only on full system backups
+     Mail = boss@mycompany.com = full, !incremental, !differential, !restore, 
+            !verify, !admin
+     # email us only when something breaks
+     MailOnError = itdept@mycompany.com = all
+   }
+
+   Notes: Kern: This should be rather trivial to implement.
+
+
+Item 19: Handle Windows Encrypted Files using Win raw encryption
+  Origin: Michael Mohr, SAG  Mohr.External@infineon.com
+  Date:  22 February 2008
+  Origin: Alex Ehrlich (Alex.Ehrlich-at-mail.ee)
+  Date:  05 August 2008
   Status:
 
-  What:  Make Bacula manage tape life cycle information, tape reuse
-          times and drive cleaning cycles.
-
-  Why:   All three parts of this project are important when operating
-          backups.
-          We need to know which tapes need replacement, and we need to
-          make sure the drives are cleaned when necessary.  While many
-          tape libraries and even autoloaders can handle all this
-          automatically, support by Bacula can be helpful for smaller
-          (older) libraries and single drives.  Limiting the number of
-          times a tape is used might prevent tape errors when using
-          tapes until the drives can't read it any more.  Also, checking
-          drive status during operation can prevent some failures (as I
-          [Arno] had to learn the hard way...)
-
-  Notes: First, Bacula could (and even does, to some limited extent)
-          record tape and drive usage.  For tapes, the number of mounts,
-          the amount of data, and the time the tape has actually been
-          running could be recorded.  Data fields for Read and Write
-          time and Number of mounts already exist in the catalog (I'm
-          not sure if VolBytes is the sum of all bytes ever written to
-          that volume by Bacula).  This information can be important
-          when determining which media to replace.  The ability to mark
-          Volumes as "used up" after a given number of write cycles
-          should also be implemented so that a tape is never actually
-          worn out.  For the tape drives known to Bacula, similar
-          information is interesting to determine the device status and
-          expected life time: Time it's been Reading and Writing, number
-          of tape Loads / Unloads / Errors.  This information is not yet
-          recorded as far as I [Arno] know.  A new volume status would
-          be necessary for the new state, like "Used up" or "Worn out".
-          Volumes with this state could be used for restores, but not
-          for writing. These volumes should be migrated first (assuming
-          migration is implemented) and, once they are no longer needed,
-          could be moved to a Trash pool.
-
-          The next step would be to implement a drive cleaning setup.
-          Bacula already has knowledge about cleaning tapes.  Once it
-          has some information about cleaning cycles (measured in drive
-          run time, number of tapes used, or calender days, for example)
-          it can automatically execute tape cleaning (with an
-          autochanger, obviously) or ask for operator assistance loading
-          a cleaning tape.
-
-          The final step would be to implement TAPEALERT checks not only
-          when changing tapes and only sending the information to the
-          administrator, but rather checking after each tape error,
-          checking on a regular basis (for example after each tape
-          file), and also before unloading and after loading a new tape.
-          Then, depending on the drives TAPEALERT state and the known
-          drive cleaning state Bacula could automatically schedule later
-          cleaning, clean immediately, or inform the operator.
-
-          Implementing this would perhaps require another catalog change
-          and perhaps major changes in SD code and the DIR-SD protocol,
-          so I'd only consider this worth implementing if it would
-          actually be used or even needed by many people.
-
-          Implementation of these projects could happen in three distinct
-          sub-projects: Measuring Tape and Drive usage, retiring
-          volumes, and handling drive cleaning and TAPEALERTs.
-
-
-Item  6: Allow FD to initiate a backup
+  What: Make it possible to backup and restore Encypted Files from and to
+          Windows systems without the need to decrypt it by using the raw
+          encryption functions API (see:
+          http://msdn2.microsoft.com/en-us/library/aa363783.aspx)
+          that is provided for that reason by Microsoft.
+          If a file ist encrypted could be examined by evaluating the 
+          FILE_ATTRIBUTE_ENCRYTED flag of the GetFileAttributes
+          function.
+          For each file backed up or restored by FD on Windows, check if
+          the file is encrypted; if so then use OpenEncryptedFileRaw,
+          ReadEncryptedFileRaw, WriteEncryptedFileRaw,
+          CloseEncryptedFileRaw instead of BackupRead and BackupWrite
+          API calls.
+
+  Why:   Without the usage of this interface the fd-daemon running
+          under the system account can't read encypted Files because
+          the key needed for the decrytion is missed by them. As a result 
+          actually encrypted files are not backed up
+          by bacula and also no error is shown while missing these files.
+
+   Notes: Using xxxEncryptedFileRaw API would allow to backup and
+           restore EFS-encrypted files without decrypting their data.
+           Note that such files cannot be restored "portably" (at least,
+           easily) but they would be restoreable to a different (or
+           reinstalled) Win32 machine; the restore would require setup
+           of a EFS recovery agent in advance, of course, and this shall
+           be clearly reflected in the documentation, but this is the
+           normal Windows SysAdmin's business.
+           When "portable" backup is requested the EFS-encrypted files
+           shall be clearly reported as errors.
+           See MSDN on the "Backup and Restore of Encrypted Files" topic:
+           http://msdn.microsoft.com/en-us/library/aa363783.aspx
+           Maybe the EFS support requires a new flag in the database for
+           each file, too?
+           Unfortunately, the implementation is not as straightforward as
+           1-to-1 replacement of BackupRead with ReadEncryptedFileRaw,
+           requiring some FD code rewrite to work with
+           encrypted-file-related callback functions.
+
+Item 20: Job migration between different SDs
+Origin:  Mariusz Czulada <manieq AT wp DOT eu>
+Date:    07 May 2007
+Status:  NEW
+
+What:   Allow to specify in migration job devices on Storage Daemon other then
+        the one used for migrated jobs (possibly on different/distant host)
+
+Why:    Sometimes we have more then one system which requires backup
+        implementation.  Often, these systems are functionally unrelated and
+        placed in different locations.  Having a big backup device (a tape
+        library) in each location is not cost-effective.  It would be much
+        better to have one powerful enough tape library which could handle
+        backups from all systems, assuming relatively fast and reliable WAN
+        connections.  In such architecture backups are done in service windows
+        on local bacula servers, then migrated to central storage off the peak
+        hours.
+
+Notes:  If migration to different SD is working, migration to the same SD, as
+        now, could be done the same way (i mean 'localhost') to unify the
+        whole process
+
+Item 19. Allow FD to initiate a backup
 Origin:  Frank Volf (frank at deze dot org)
 Date:    17 November 2005
 Status: 
@@ -333,9 +660,9 @@ Notes: - The FD already has code for the monitor interface
        8. The console interface to the FD should be extended to
           permit a properly authorized console to initiate a
           backup via the FD.
-              
 
-Item  7: Implement Storage daemon compression
+
+Item 21: Implement Storage daemon compression
   Date:  18 December 2006
   Origin: Vadim A. Umanski , e-mail umanski@ext.ru
   Status:
@@ -356,223 +683,7 @@ Item  7: Implement Storage daemon compression
           That's why the server-side compression feature is needed!
   Notes:
 
-
-Item  8: Reduction of communications bandwidth for a backup
-   Date: 14 October 2008
- Origin: Robin O'Leary (Equiinet)
- Status: 
-
-  What:  Using rdiff techniques, Bacula could significantly reduce
-          the network data transfer volume to do a backup.
-
-  Why:   Faster backup across the Internet
-
-  Notes: This requires retaining certain data on the client during a Full
-          backup that will speed up subsequent backups.
-     
-     
-Item  9: Ability to reconnect a disconnected comm line
-  Date:  26 April 2009
-  Origin: Kern/Eric
-  Status: 
-
-  What:  Often jobs fail because of a communications line drop. In that 
-          case, Bacula should be able to reconnect to the other daemon and
-          resume the job.
-
-  Why:   Avoids backuping data already saved.
-
-  Notes: *Very* complicated from a design point of view because of authenication.
-
-Item 10: Start spooling even when waiting on tape
-  Origin: Tobias Barth <tobias.barth@web-arts.com>
-  Date:  25 April 2008
-  Status:
-
-  What: If a job can be spooled to disk before writing it to tape, it should
-          be spooled immediately.  Currently, bacula waits until the correct
-          tape is inserted into the drive.
-
-  Why:   It could save hours.  When bacula waits on the operator who must insert
-          the correct tape (e.g.  a new tape or a tape from another media
-          pool), bacula could already prepare the spooled data in the spooling
-          directory and immediately start despooling when the tape was
-          inserted by the operator.
-         
-          2nd step: Use 2 or more spooling directories.  When one directory is
-          currently despooling, the next (on different disk drives) could
-          already be spooling the next data.
-
-  Notes: I am using bacula 2.2.8, which has none of those features
-         implemented.
-
-
-Item 11: Include all conf files in specified directory
-Date:  18 October 2008
-Origin: Database, Lda. Maputo, Mozambique
-Contact:Cameron Smith / cameron.ord@database.co.mz 
-Status: New request
-
-What: A directive something like "IncludeConf = /etc/bacula/subconfs" Every
-      time Bacula Director restarts or reloads, it will walk the given
-      directory (non-recursively) and include the contents of any files
-      therein, as though they were appended to bacula-dir.conf
-
-Why: Permits simplified and safer configuration for larger installations with
-      many client PCs.  Currently, through judicious use of JobDefs and
-      similar directives, it is possible to reduce the client-specific part of
-      a configuration to a minimum.  The client-specific directives can be
-      prepared according to a standard template and dropped into a known
-      directory.  However it is still necessary to add a line to the "master"
-      (bacula-dir.conf) referencing each new file.  This exposes the master to
-      unnecessary risk of accidental mistakes and makes automation of adding
-      new client-confs, more difficult (it is easier to automate dropping a
-      file into a dir, than rewriting an existing file).  Ken has previously
-      made a convincing argument for NOT including Bacula's core configuration
-      in an RDBMS, but I believe that the present request is a reasonable
-      extension to the current "flat-file-based" configuration philosophy.
- 
-Notes: There is NO need for any special syntax to these files.  They should
-       contain standard directives which are simply "inlined" to the parent
-       file as already happens when you explicitly reference an external file.
-
-Notes: (kes) this can already be done with scripting
-     From: John Jorgensen <jorgnsn@lcd.uregina.ca>
-     The bacula-dir.conf at our site contains these lines:
-
-   #
-   # Include subfiles associated with configuration of clients.
-   # They define the bulk of the Clients, Jobs, and FileSets.
-   #
-   @|"sh -c 'for f in /etc/bacula/clientdefs/*.conf ; do echo @${f} ; done'"
-
-    and when we get a new client, we just put its configuration into
-    a new file called something like:
-
-    /etc/bacula/clientdefs/clientname.conf
-
-
-Item 12: Multiple threads in file daemon for the same job
-  Date:  27 November 2005
-  Origin: Ove Risberg (Ove.Risberg at octocode dot com)
-  Status:
-
-  What:  I want the file daemon to start multiple threads for a backup
-          job so the fastest possible backup can be made.
-
-          The file daemon could parse the FileSet information and start
-          one thread for each File entry located on a separate
-          filesystem.
-
-          A confiuration option in the job section should be used to
-          enable or disable this feature. The confgutration option could
-          specify the maximum number of threads in the file daemon.
-
-          If the theads could spool the data to separate spool files
-          the restore process will not be much slower.
-
-  Why:   Multiple concurrent backups of a large fileserver with many
-          disks and controllers will be much faster.
-
-  Notes: (KES) This is not necessary and could be accomplished
-         by having two jobs.  In addition, the current VSS code
-         is single thread.
-
-
-Item 13: Possibilty to schedule Jobs on last Friday of the month
-Origin: Carsten Menke <bootsy52 at gmx dot net>
-Date:   02 March 2008
-Status:
-
-   What: Currently if you want to run your monthly Backups on the last
-           Friday of each month this is only possible with workarounds (e.g
-           scripting) (As some months got 4 Fridays and some got 5 Fridays)
-           The same is true if you plan to run your yearly Backups on the
-           last Friday of the year.  It would be nice to have the ability to
-           use the builtin scheduler for this.
-
-   Why:   In many companies the last working day of the week is Friday (or 
-           Saturday), so to get the most data of the month onto the monthly
-           tape, the employees are advised to insert the tape for the
-           monthly backups on the last friday of the month.
-
-   Notes: To give this a complete functionality it would be nice if the
-           "first" and "last" Keywords could be implemented in the
-           scheduler, so it is also possible to run monthy backups at the
-           first friday of the month and many things more.  So if the syntax
-           would expand to this {first|last} {Month|Week|Day|Mo-Fri} of the
-           {Year|Month|Week} you would be able to run really flexible jobs.
-
-           To got a certain Job run on the last Friday of the Month for example
-           one could then write
-
-              Run = pool=Monthly last Fri of the Month at 23:50
-
-              ## Yearly Backup
-
-              Run = pool=Yearly last Fri of the Year at 23:50
-
-              ## Certain Jobs the last Week of a Month
-
-              Run = pool=LastWeek last Week of the Month at 23:50
-
-              ## Monthly Backup on the last day of the month
-
-              Run = pool=Monthly last Day of the Month at 23:50
-
-
-Item 14: Include timestamp of job launch in "stat clients" output
-  Origin: Mark Bergman <mark.bergman@uphs.upenn.edu>
-  Date:  Tue Aug 22 17:13:39 EDT 2006
-  Status:
-
-  What:  The "stat clients" command doesn't include any detail on when
-          the active backup jobs were launched.
-
-  Why:   Including the timestamp would make it much easier to decide whether
-          a job is running properly. 
-
-  Notes: It may be helpful to have the output from "stat clients" formatted 
-          more like that from "stat dir" (and other commands), in a column
-          format. The per-client information that's currently shown (level,
-          client name, JobId, Volume, pool, device, Files, etc.) is good, but
-          somewhat hard to parse (both programmatically and visually), 
-          particularly when there are many active clients.
-
-
-Item 15: Message mailing based on backup types
- Origin: Evan Kaufman <evan.kaufman@gmail.com>
-   Date: January 6, 2006
- Status:
-
-   What: In the "Messages" resource definitions, allowing messages
-          to be mailed based on the type (backup, restore, etc.) and level
-          (full, differential, etc) of job that created the originating
-          message(s).
-
- Why:    It would, for example, allow someone's boss to be emailed
-          automatically only when a Full Backup job runs, so he can
-          retrieve the tapes for offsite storage, even if the IT dept.
-          doesn't (or can't) explicitly notify him.  At the same time, his
-          mailbox wouldnt be filled by notifications of Verifies, Restores,
-          or Incremental/Differential Backups (which would likely be kept
-          onsite).
-
- Notes: One way this could be done is through additional message types, for
- example:
-
-   Messages {
-     # email the boss only on full system backups
-     Mail = boss@mycompany.com = full, !incremental, !differential, !restore, 
-            !verify, !admin
-     # email us only when something breaks
-     MailOnError = itdept@mycompany.com = all
-   }
-
-   Notes: Kern: This should be rather trivial to implement.
-
-
-Item 16: Ability to import/export Bacula database entities
+Item 22: Ability to import/export Bacula database entities
    Date: 26 April 2009
  Origin: Eric
  Status: 
@@ -587,7 +698,7 @@ Item 16: Ability to import/export Bacula database entities
           other criteria.
 
 
-Item 17: Implementation of running Job speed limit.
+Item 23: Implementation of running Job speed limit.
 Origin: Alex F, alexxzell at yahoo dot com
 Date: 29 January 2009
 
@@ -609,7 +720,7 @@ Why: Because of a couple of reasons.  First, it's very hard to implement a
      especially where there is little available.
 
 
-Item 18: Add an override in Schedule for Pools based on backup types
+Item 24: Add an override in Schedule for Pools based on backup types
 Date:    19 Jan 2005
 Origin:  Chad Slater <chad.slater@clickfox.com>
 Status: 
@@ -629,7 +740,7 @@ Status:
           has more capacity (i.e. a 8TB tape library.
 
 
-Item 19: Automatic promotion of backup levels based on backup size
+Item 25: Automatic promotion of backup levels based on backup size
    Date: 19 January 2006
   Origin: Adam Thornton <athornton@sinenomine.net>
   Status: 
@@ -649,7 +760,7 @@ Item 19: Automatic promotion of backup levels based on backup size
           of).
 
 
-Item 20: Allow FileSet inclusion/exclusion by creation/mod times
+Item 26: Allow FileSet inclusion/exclusion by creation/mod times
   Origin: Evan Kaufman <evan.kaufman@gmail.com>
   Date:  January 11, 2006
   Status:
@@ -699,7 +810,7 @@ Item 20: Allow FileSet inclusion/exclusion by creation/mod times
            or 'since'.
 
 
-Item 21: Archival (removal) of User Files to Tape
+Item 27: Archival (removal) of User Files to Tape
   Date:  Nov. 24/2005 
   Origin: Ray Pengelly [ray at biomed dot queensu dot ca
   Status: 
@@ -726,23 +837,47 @@ Item 21: Archival (removal) of User Files to Tape
           access time.  Then after another 6 months (or possibly as one
           storage pool gets full) data is migrated to Tape.
 
+Item 28: Ability to reconnect a disconnected comm line
+  Date:  26 April 2009
+  Origin: Kern/Eric
+  Status: 
+
+  What:  Often jobs fail because of a communications line drop. In that 
+          case, Bacula should be able to reconnect to the other daemon and
+          resume the job.
+
+  Why:   Avoids backuping data already saved.
+
+  Notes: *Very* complicated from a design point of view because of authenication.
+
+Item 29: Multiple threads in file daemon for the same job
+  Date:  27 November 2005
+  Origin: Ove Risberg (Ove.Risberg at octocode dot com)
+  Status:
+
+  What:  I want the file daemon to start multiple threads for a backup
+          job so the fastest possible backup can be made.
 
-Item 22: An option to operate on all pools with update vol parameters
-  Origin: Dmitriy Pinchukov <absh@bossdev.kiev.ua>
-   Date: 16 August 2006
-  Status: Patch made by  Nigel Stepp
+          The file daemon could parse the FileSet information and start
+          one thread for each File entry located on a separate
+          filesystem.
 
-   What: When I do update -> Volume parameters -> All Volumes
-          from Pool, then I have to select pools one by one.  I'd like
-          console to have an option like "0: All Pools" in the list of
-          defined pools.
+          A confiuration option in the job section should be used to
+          enable or disable this feature. The confgutration option could
+          specify the maximum number of threads in the file daemon.
 
-   Why:  I have many pools and therefore unhappy with manually
-          updating each of them using update -> Volume parameters -> All
-          Volumes from Pool -> pool #.
+          If the theads could spool the data to separate spool files
+          the restore process will not be much slower.
+
+  Why:   Multiple concurrent backups of a large fileserver with many
+          disks and controllers will be much faster.
+
+  Notes: (KES) This is not necessary and could be accomplished
+         by having two jobs.  In addition, the current VSS code
+         is single thread.
 
 
-Item 23: Automatic disabling of devices
+Item 30: Automatic disabling of devices
    Date: 2005-11-11
   Origin: Peter Eriksson <peter at ifm.liu dot se>
   Status:
@@ -769,47 +904,7 @@ Item 23: Automatic disabling of devices
           instead.
 
 
-Item 24: Ability to defer Batch Insert to a later time
-   Date: 26 April 2009
- Origin: Eric
- Status: 
-
-  What:  Instead of doing a Job Batch Insert at the end of the Job
-          which might create resource contention with lots of Job,
-          defer the insert to a later time.
-
-  Why:   Permits to focus on getting the data on the Volume and
-          putting the metadata into the Catalog outside the backup
-          window.
-
-  Notes: Will use the proposed Bacula ASCII database import/export
-          format (i.e. dependent on the import/export entities project).
-
-
-Item 25: Add MaxVolumeSize/MaxVolumeBytes to Storage resource
-   Origin: Bastian Friedrich <bastian.friedrich@collax.com>
-   Date:  2008-07-09
-   Status: -
-
-   What:  SD has a "Maximum Volume Size" statement, which is deprecated and
-           superseded by the Pool resource statement "Maximum Volume Bytes".
-           It would be good if either statement could be used in Storage
-           resources.
-
-   Why:   Pools do not have to be restricted to a single storage type/device;
-           thus, it may be impossible to define Maximum Volume Bytes in the
-           Pool resource.  The old MaxVolSize statement is deprecated, as it
-           is SD side only.  I am using the same pool for different devices.
-
-   Notes: State of idea currently unknown.  Storage resources in the dir
-           config currently translate to very slim catalog entries; these
-           entries would require extensions to implement what is described
-           here.  Quite possibly, numerous other statements that are currently
-           available in Pool resources could be used in Storage resources too
-           quite well.
-
-
-Item 26: Enable persistent naming/number of SQL queries
+Item 31: Enable persistent naming/number of SQL queries
   Date:  24 Jan, 2007 
   Origin: Mark Bergman 
   Status: 
@@ -875,7 +970,7 @@ Item 26: Enable persistent naming/number of SQL queries
         than by number.
 
 
-Item 27: Bacula Dir, FD and SD to support proxies
+Item 32: Bacula Dir, FD and SD to support proxies
 Origin: Karl Grindley @ MIT Lincoln Laboratory <kgrindley at ll dot mit dot edu>
 Date:  25 March 2009
 Status: proposed
@@ -916,7 +1011,7 @@ Notes: Director resource tunneling: This configuration option to utilize a
         One could also possibly use stunnel, netcat, etc.
 
 
-Item 28: Add Minumum Spool Size directive
+Item 33: Add Minumum Spool Size directive
 Date: 20 March 2008
 Origin: Frank Sweetser <fs@wpi.edu>
 
@@ -939,114 +1034,10 @@ Origin: Frank Sweetser <fs@wpi.edu>
         gigabytes) it can easily produce multi-megabyte report emails!
 
 
-Item 29: Handle Windows Encrypted Files using Win raw encryption
-  Origin: Michael Mohr, SAG  Mohr.External@infineon.com
-  Date:  22 February 2008
-  Origin: Alex Ehrlich (Alex.Ehrlich-at-mail.ee)
-  Date:  05 August 2008
-  Status:
-
-  What: Make it possible to backup and restore Encypted Files from and to
-          Windows systems without the need to decrypt it by using the raw
-          encryption functions API (see:
-          http://msdn2.microsoft.com/en-us/library/aa363783.aspx)
-          that is provided for that reason by Microsoft.
-          If a file ist encrypted could be examined by evaluating the 
-          FILE_ATTRIBUTE_ENCRYTED flag of the GetFileAttributes
-          function.
-          For each file backed up or restored by FD on Windows, check if
-          the file is encrypted; if so then use OpenEncryptedFileRaw,
-          ReadEncryptedFileRaw, WriteEncryptedFileRaw,
-          CloseEncryptedFileRaw instead of BackupRead and BackupWrite
-          API calls.
-
-  Why:   Without the usage of this interface the fd-daemon running
-          under the system account can't read encypted Files because
-          the key needed for the decrytion is missed by them. As a result 
-          actually encrypted files are not backed up
-          by bacula and also no error is shown while missing these files.
-
-   Notes: Using xxxEncryptedFileRaw API would allow to backup and
-           restore EFS-encrypted files without decrypting their data.
-           Note that such files cannot be restored "portably" (at least,
-           easily) but they would be restoreable to a different (or
-           reinstalled) Win32 machine; the restore would require setup
-           of a EFS recovery agent in advance, of course, and this shall
-           be clearly reflected in the documentation, but this is the
-           normal Windows SysAdmin's business.
-           When "portable" backup is requested the EFS-encrypted files
-           shall be clearly reported as errors.
-           See MSDN on the "Backup and Restore of Encrypted Files" topic:
-           http://msdn.microsoft.com/en-us/library/aa363783.aspx
-           Maybe the EFS support requires a new flag in the database for
-           each file, too?
-           Unfortunately, the implementation is not as straightforward as
-           1-to-1 replacement of BackupRead with ReadEncryptedFileRaw,
-           requiring some FD code rewrite to work with
-           encrypted-file-related callback functions.
-
-
-Item 30: Implement a Storage device like Amazon's S3.
-  Date:  25 August 2008
-  Origin: Soren Hansen <soren@ubuntu.com>
-  Status: Not started.
-  What:  Enable the storage daemon to store backup data on Amazon's
-          S3 service.
-
-  Why:   Amazon's S3 is a cheap way to store data off-site. 
-
-  Notes: If we configure the Pool to put only one job per volume (they don't
-         support append operation), and the volume size isn't to big (100MB?),
-         it should be easy to adapt the disk-changer script to add get/put
-         procedure with curl. So, the data would be safetly copied during the
-         Job. 
-
-         Cloud should be only used with Copy jobs, users should always have
-         a copy of their data on their site.
-
-         We should also think to have our own cache, trying always to have
-         cloud volume on the local disk. (I don't know if users want to store
-         100GB on cloud, so it shouldn't be a disk size problem). For example,
-         if bacula want to recycle a volume, it will start by downloading the
-         file to truncate it few seconds later, if we can avoid that...
-
-Item 31: Convert tray monitor on Windows to a stand alone program
-   Date: 26 April 2009
- Origin: Kern/Eric
- Status: 
-
-  What:  Separate Win32 tray monitor to be a separate program.
-
-  Why:   Vista does not allow SYSTEM services to interact with the 
-          desktop, so the current tray monitor does not work on Vista
-          machines.  
 
-  Notes: Requires communicating with the FD via the network (simulate
-          a console connection).
 
 
-Item 32: Relabel disk volume after recycling
-  Origin: Pasi Kärkkäinen <pasik@iki.fi>
-  Date:   07 May 2009.
-  Status: Not implemented yet, no code written.
-
-  What: The ability to relabel the disk volume (and thus rename the file on the
-        disk) after it has been recycled. Useful when you have a single job
-        per disk volume, and you use a custom Label format, for example:
-        Label Format =
-        "${Client}-${Level}-${NumVols:p/4/0/r}-${Year}_${Month}_${Day}-${Hour}_${Minute}"
-
-  Why: Disk volumes in Bacula get the label/filename when they are used for the
-       first time.  If you use recycling and custom label format like above,
-       the disk volume name doesn't match the contents after it has been
-       recycled.  This feature makes it possible to keep the label/filename
-       in sync with the content and thus makes it easy to check/monitor the
-       backups from the shell and/or normal file management tools, because
-       the filenames of the disk volumes match the content.
-
-  Notes:  The configuration option could be "Relabel after Recycling = Yes".
-
-Item 33: Command that releases all drives in an autochanger
+Item 34: Command that releases all drives in an autochanger
   Origin: Blake Dunlap (blake@nxs.net)
   Date:   10/07/2009
   Status: Request
@@ -1069,7 +1060,7 @@ Item 33: Command that releases all drives in an autochanger
        configuration quicker/easier, as all drives need to be released
        before any modifications to slots.
 
-Item 34: Run bscan on a remote storage daemon from within bconsole.
+Item 35: Run bscan on a remote storage daemon from within bconsole.
   Date:  07 October 2009
   Origin: Graham Keeling <graham@equiinet.com>
   Status: Proposing
@@ -1106,7 +1097,7 @@ Item 34: Run bscan on a remote storage daemon from within bconsole.
          code is used in both the bscan program and the Storage daemon to avoid
          adding a lot of new code that must be maintained by the project.
 
-Item 35: Implement a Migration job type that will create a reverse
+Item 36: Implement a Migration job type that will create a reverse
           incremental (or decremental) backup from two existing full backups.
   Date:   05 October 2009
   Origin: Griffith College Dublin.  Some sponsorship available.
@@ -1130,82 +1121,9 @@ Item 35: Implement a Migration job type that will create a reverse
   Notes:  This feature was previously discussed on the bacula-devel list
           here: http://www.mail-archive.com/bacula-devel@lists.sourceforge.net/msg04962.html
 
-Item 36: Job migration between different SDs
-Origin:  Mariusz Czulada <manieq AT wp DOT eu>
-Date:    07 May 2007
-Status:  NEW
 
-What:   Allow to specify in migration job devices on Storage Daemon other then
-        the one used for migrated jobs (possibly on different/distant host)
 
-Why:    Sometimes we have more then one system which requires backup
-        implementation.  Often, these systems are functionally unrelated and
-        placed in different locations.  Having a big backup device (a tape
-        library) in each location is not cost-effective.  It would be much
-        better to have one powerful enough tape library which could handle
-        backups from all systems, assuming relatively fast and reliable WAN
-        connections.  In such architecture backups are done in service windows
-        on local bacula servers, then migrated to central storage off the peak
-        hours.
-
-Notes:  If migration to different SD is working, migration to the same SD, as
-        now, could be done the same way (i mean 'localhost') to unify the
-        whole process
-
-Item 37: Concurrent spooling and despooling withini a single job.
-Date:  17 nov 2009
-Origin: Jesper Krogh <jesper@krogh.cc>
-Status: NEW
-What:  When a job has spooling enabled and the spool area size is
-       less than the total volumes size the storage daemon will:
-       1) Spool to spool-area
-       2) Despool to tape
-       3) Go to 1 if more data to be backed up.
-
-       Typical disks will serve data with a speed of 100MB/s when
-       dealing with large files, network it typical capable of doing 115MB/s
-       (GbitE). Tape drives will despool with 50-90MB/s (LTO3) 70-120MB/s
-       (LTO4) depending on compression and data.
-
-       As bacula currently works it'll hold back data from the client until
-       de-spooling is done, now matter if the spool area can handle another
-       block of data. Say given a FileSet of 4TB and a spool-area of 100GB and
-       a Maximum Job Spool Size set to 50GB then above sequence could be
-       changed to allow to spool to the other 50GB while despooling the first
-       50GB and not holding back the client while doing it. As above numbers
-       show, depending on tape-drive and disk-arrays this potentially leads to
-       a cut of the backup-time of 50% for the individual jobs.
-
-       Real-world example, backing up 112.6GB (large files) to LTO4 tapes
-       (despools with ~75MB/s, data is gzipped on the remote filesystem.
-       Maximum Job Spool Size = 8GB
-
-       Current:
-       Size: 112.6GB
-       Elapsed time (total time): 46m 15s => 2775s
-       Despooling time: 25m 41s => 1541s (55%)
-       Spooling time: 20m 34s => 1234s (45%)
-       Reported speed: 40.58MB/s
-       Spooling speed: 112.6GB/1234s => 91.25MB/s
-       Despooling speed: 112.6GB/1541s => 73.07MB/s
-
-       So disk + net can "keep up" with the LTO4 drive (in this test)
-
-       Prosed change would effectively make the backup run in the "despooling
-       time" 1541s giving a reduction to 55% of the total run time.
-
-       In the situation where the individual job cannot keep up with LTO-drive
-       spooling enables efficient multiplexing of multiple concurrent jobs onto
-       the same drive.
-
-Why:   When dealing with larger volumes the general utillization of the
-       network/disk is important to maximize in order to be able to run a full
-       backup over a weekend. Current work-around is to split the FileSet in
-       smaller FileSet and Jobs but that leads to more configuration mangement
-       and is harder to review for completeness. Subsequently it makes restores
-       more complex.
-
-Item 39: Extend the verify code to make it possible to verify
+Item 37: Extend the verify code to make it possible to verify
           older jobs, not only the last one that has finished
   Date:   10 April 2009
   Origin: Ralf Gross (Ralf-Lists <at> ralfgross.de)
@@ -1262,7 +1180,7 @@ Item 39: Extend the verify code to make it possible to verify
 
 
 
-Item 40: Separate "Storage" and "Device" in the bacula-dir.conf
+Item 38: Separate "Storage" and "Device" in the bacula-dir.conf
   Date:   29 April 2009
   Origin: "James Harper" <james.harper@bendigoit.com.au>
   Status: not implemented or documented
@@ -1299,7 +1217,7 @@ Item 40: Separate "Storage" and "Device" in the bacula-dir.conf
 
   Notes:  
 
-Item 41: Least recently used device selection for tape drives in autochanger.
+Item 39: Least recently used device selection for tape drives in autochanger.
 Date:    12 October 2009
 Origin:  Thomas Carter <tcarter@memc.com>
 Status:  Proposal
@@ -1318,9 +1236,88 @@ Why:  The current implementation places a majority of use and wear on drive
 
 Notes:
 
+Item 40: Implement a Storage device like Amazon's S3.
+  Date:  25 August 2008
+  Origin: Soren Hansen <soren@ubuntu.com>
+  Status: Not started.
+  What:  Enable the storage daemon to store backup data on Amazon's
+          S3 service.
+
+  Why:   Amazon's S3 is a cheap way to store data off-site. 
+
+  Notes: If we configure the Pool to put only one job per volume (they don't
+         support append operation), and the volume size isn't to big (100MB?),
+         it should be easy to adapt the disk-changer script to add get/put
+         procedure with curl. So, the data would be safetly copied during the
+         Job. 
+
+         Cloud should be only used with Copy jobs, users should always have
+         a copy of their data on their site.
+
+         We should also think to have our own cache, trying always to have
+         cloud volume on the local disk. (I don't know if users want to store
+         100GB on cloud, so it shouldn't be a disk size problem). For example,
+         if bacula want to recycle a volume, it will start by downloading the
+         file to truncate it few seconds later, if we can avoid that...
+
+Item 41: Convert tray monitor on Windows to a stand alone program
+   Date: 26 April 2009
+ Origin: Kern/Eric
+ Status: 
+
+  What:  Separate Win32 tray monitor to be a separate program.
+
+  Why:   Vista does not allow SYSTEM services to interact with the 
+          desktop, so the current tray monitor does not work on Vista
+          machines.  
+
+  Notes: Requires communicating with the FD via the network (simulate
+          a console connection).
+
+Item 42: Improve Bacula's tape and drive usage and cleaning management 
+  Date:  8 November 2005, November 11, 2005
+  Origin: Adam Thornton <athornton at sinenomine dot net>,
+          Arno Lehmann <al at its-lehmann dot de>
+  Status:
+
+  What:  
+          1. Measure tape and drive usage (mostly implemented)
+          2. Retiring a volume when too old or too many errors
+          3. Handle cleaning and tape alerts.
+
+  Why:   Needed
+
+
+Item 43: Relabel disk volume after recycling
+  Origin: Pasi Kärkkäinen <pasik@iki.fi>
+  Date:   07 May 2009.
+  Status: Not implemented yet, no code written.
+
+  What: The ability to relabel the disk volume (and thus rename the file on the
+        disk) after it has been recycled. Useful when you have a single job
+        per disk volume, and you use a custom Label format, for example:
+        Label Format =
+        "${Client}-${Level}-${NumVols:p/4/0/r}-${Year}_${Month}_${Day}-${Hour}_${Minute}"
+
+  Why: Disk volumes in Bacula get the label/filename when they are used for the
+       first time.  If you use recycling and custom label format like above,
+       the disk volume name doesn't match the contents after it has been
+       recycled.  This feature makes it possible to keep the label/filename
+       in sync with the content and thus makes it easy to check/monitor the
+       backups from the shell and/or normal file management tools, because
+       the filenames of the disk volumes match the content.
+
+  Notes:  The configuration option could be "Relabel after Recycling = Yes".
+
+
+
 ========= New items after last vote ====================
 
 
+Note to renumber items use:
+scripts/renumber_projects.pl projects >1
+
+
 ========= Add new items above this line =================
 
 
@@ -1342,12 +1339,13 @@ Item  n: One line summary ...
 
 
 ========== Items completed in version 5.0.0 ====================
-*Item  2: 'restore' menu: enter a JobId, automatically select dependents
-*Item  5: Deletion of disk Volumes when pruned (partial -- truncate when pruned)
-*Item  6: Implement Base jobs
-*Item 10: Restore from volumes on multiple storage daemons
-*Item 15: Enable/disable compression depending on storage device (disk/tape)
-*Item 20: Cause daemons to use a specific IP address to source communications
-*Item 23: "Maximum Concurrent Jobs" for drives when used with changer device
-*Item 31: List InChanger flag when doing restore.
-*Item 35: Port bat to Win32
+*Item   : 'restore' menu: enter a JobId, automatically select dependents
+*Item   : Deletion of disk Volumes when pruned (partial -- truncate when pruned)
+*Item   : Implement Base jobs
+*Item   : Restore from volumes on multiple storage daemons
+*Item   : Enable/disable compression depending on storage device (disk/tape)
+*Item   : Cause daemons to use a specific IP address to source communications
+*Item   : "Maximum Concurrent Jobs" for drives when used with changer device
+*Item   : List InChanger flag when doing restore.
+*Item   : Port bat to Win32
+*Item   : An option to operate on all pools with update vol parameters
diff --git a/bacula/src/cats/make_mysql_tables.in b/bacula/src/cats/make_mysql_tables.in
index 37a10d2..e50e41f 100644
--- a/bacula/src/cats/make_mysql_tables.in
+++ b/bacula/src/cats/make_mysql_tables.in
@@ -224,7 +224,7 @@ CREATE TABLE Media (
     'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL,
    Enabled TINYINT DEFAULT 1,
    Recycle TINYINT DEFAULT 0,
-   ActionOnPurge     TINYINT    DEFAULT 0,
+   ActionOnPurge     TINYINT	DEFAULT 0,
    VolRetention BIGINT UNSIGNED DEFAULT 0,
    VolUseDuration BIGINT UNSIGNED DEFAULT 0,
    MaxVolJobs INTEGER UNSIGNED DEFAULT 0,
@@ -264,7 +264,7 @@ CREATE TABLE Pool (
    MaxVolBytes BIGINT UNSIGNED DEFAULT 0,
    AutoPrune TINYINT DEFAULT 0,
    Recycle TINYINT DEFAULT 0,
-   ActionOnPurge     TINYINT    DEFAULT 0,
+   ActionOnPurge     TINYINT	DEFAULT 0,
    PoolType ENUM('Backup', 'Copy', 'Cloned', 'Archive', 'Migration', 'Scratch') NOT NULL,
    LabelType TINYINT DEFAULT 0,
    LabelFormat TINYBLOB,
@@ -324,8 +324,8 @@ CREATE TABLE UnsavedFiles (
 
 CREATE TABLE Counters (
    Counter TINYBLOB NOT NULL,
-   MinValue INTEGER DEFAULT 0,
-   MaxValue INTEGER DEFAULT 0,
+   \`MinValue\` INTEGER DEFAULT 0,
+   \`MaxValue\` INTEGER DEFAULT 0,
    CurrentValue INTEGER DEFAULT 0,
    WrapCounter TINYBLOB NOT NULL,
    PRIMARY KEY (Counter(128))
@@ -375,7 +375,7 @@ CREATE TABLE PathHierarchy
 );
 
 CREATE INDEX pathhierarchy_ppathid 
-          ON PathHierarchy (PPathId);
+	  ON PathHierarchy (PPathId);
 
 CREATE TABLE PathVisibility
 (
@@ -386,7 +386,7 @@ CREATE TABLE PathVisibility
       CONSTRAINT pathvisibility_pkey PRIMARY KEY (JobId, PathId)
 );
 CREATE INDEX pathvisibility_jobid
-             ON PathVisibility (JobId);
+	     ON PathVisibility (JobId);
 
 CREATE TABLE Version (
    VersionId INTEGER UNSIGNED NOT NULL 
diff --git a/bacula/src/cats/sql_create.c b/bacula/src/cats/sql_create.c
index e3d4f74..ad84e67 100644
--- a/bacula/src/cats/sql_create.c
+++ b/bacula/src/cats/sql_create.c
@@ -627,7 +627,7 @@ int db_create_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr)
    }
 
    /* Must create it */
-   Mmsg(mdb->cmd, "INSERT INTO Counters (Counter,MinValue,MaxValue,CurrentValue,"
+   Mmsg(mdb->cmd, "INSERT INTO Counters (Counter,\"MinValue\",\"MaxValue\",CurrentValue,"
       "WrapCounter) VALUES ('%s','%d','%d','%d','%s')",
       cr->Counter, cr->MinValue, cr->MaxValue, cr->CurrentValue,
       cr->WrapCounter);
diff --git a/bacula/src/cats/sql_get.c b/bacula/src/cats/sql_get.c
index 13f053b..e9d3bcc 100644
--- a/bacula/src/cats/sql_get.c
+++ b/bacula/src/cats/sql_get.c
@@ -730,7 +730,7 @@ int db_get_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr)
    SQL_ROW row;
 
    db_lock(mdb);
-   Mmsg(mdb->cmd, "SELECT MinValue,MaxValue,CurrentValue,WrapCounter "
+   Mmsg(mdb->cmd, "SELECT \"MinValue\",\"MaxValue\",CurrentValue,WrapCounter "
       "FROM Counters WHERE Counter='%s'", cr->Counter);
 
    if (QUERY_DB(jcr, mdb, mdb->cmd)) {
diff --git a/bacula/src/cats/sql_update.c b/bacula/src/cats/sql_update.c
index 30f60fd..5ac95e5 100644
--- a/bacula/src/cats/sql_update.c
+++ b/bacula/src/cats/sql_update.c
@@ -243,7 +243,7 @@ int db_update_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr)
    db_lock(mdb);
 
    Mmsg(mdb->cmd,
-"UPDATE Counters SET MinValue=%d,MaxValue=%d,CurrentValue=%d,"
+"UPDATE Counters SET \"MinValue\"=%d,\"MaxValue\"=%d,CurrentValue=%d,"
 "WrapCounter='%s' WHERE Counter='%s'",
       cr->MinValue, cr->MaxValue, cr->CurrentValue,
       cr->WrapCounter, cr->Counter);
diff --git a/bacula/src/version.h b/bacula/src/version.h
index c1f0c9d..cfde162 100644
--- a/bacula/src/version.h
+++ b/bacula/src/version.h
@@ -1,8 +1,8 @@
 
 #undef  VERSION
 #define VERSION "5.0.3"
-#define BDATE   "04 August 2010"
-#define LSMDATE "04Aug10"
+#define BDATE   "30 August 2010"
+#define LSMDATE "30Aug10"
 
 #define PROG_COPYRIGHT "Copyright (C) %d-2010 Free Software Foundation Europe e.V.\n"
 #define BYEAR "2010"       /* year for copyright messages in progs */
@@ -52,7 +52,7 @@
 #define TRACE_FILE 1
 
 /* If this is set stdout will not be closed on startup */
-/* #define DEVELOPER 1 */
+#define DEVELOPER 1
 
 /*
  * SMCHECK does orphaned buffer checking (memory leaks)
openSUSE Build Service is sponsored by