diff -u AmazonS3StreamWrapper.inc AmazonS3StreamWrapper.inc
--- AmazonS3StreamWrapper.inc	2013-07-23 16:16:41.000000000 -0700
+++ AmazonS3StreamWrapper.inc	2013-09-11 16:27:12.000000000 -0700
@@ -15,7 +15,7 @@
   protected $uri;
   
   /**
-   * @var AmazonS3 S3 client object, shared across all instances of 
+   * @var AmazonS3 S3 client object, shared across all instances of
    * AmazonS3StreamWrapper.
    */
   protected static $s3_client = null;
@@ -61,7 +61,7 @@
   protected $buffer_length = 0;
   
   /**
-   * Records the number of calls to stream_write() between each call to 
+   * Records the number of calls to stream_write() between each call to
    * stream_flush(), for testing purposes.
    */
   protected $sw_call_count = 0;
@@ -105,7 +105,7 @@
    * Uses Drupal's mimetype mapping, unless a different mapping is specified.
    *
    * @return
-   *   Returns a string representing the file's MIME type, or 
+   *   Returns a string representing the file's MIME type, or
    *   'application/octet-stream' if no type cna be determined.
    */
   public static function getMimeType($uri, $mapping=NULL) {
@@ -153,20 +153,20 @@
     $this->bucket = $bucket = variable_get('amazons3_bucket', '');
     
     // If it hasn't already been done in this request, load the AWSSDK library
-    // and create the AmazonS3 client. 
+    // and create the AmazonS3 client.
     if (!isset(self::$s3_client)) {
       if (!libraries_load('awssdk')) {
         throw new Exception(t('Unable to load the AWS SDK. Please check you have installed the library correctly and configured your S3 credentials.'));
       }
       else if (empty($this->bucket)) {
-        throw new Exception(t('AmazonS3 bucket name not configured. Please visit the !config_page.', 
-          array('!config_page' => l('configuration page', '/admin/config/media/amazons3'))));
+        throw new Exception(t('AmazonS3 bucket name not configured. Please visit the !config_page.',
+          array('!config_page' => l(t('configuration page'), '/admin/config/media/amazons3'))));
       }
       else {
         try {
          self::$s3_client = new AmazonS3();
          // Using SSL slows down uploads significantly, but it's unsafe to disable it.
-         // I'm still looking for a better solution. -- coredumperror 2013/07/12 
+         // I'm still looking for a better solution. -- coredumperror 2013/07/12
          //self::$s3_client->disable_ssl();
          $this->_test_log('Created AmazonS3 client.');
         }
@@ -264,7 +264,7 @@
     
     // Image styles support:
     // If an image derivative URL (e.g. styles/thumbnail/blah.jpg) is requested
-    // and the file doesn't exist, return a private URL instead. Drupal will 
+    // and the file doesn't exist, return a system URL instead. Drupal will
     // create the derivative when that URL gets requested.
     $path_parts = explode('/', $s3_filename);
     if ($path_parts[0] == 'styles') {
@@ -341,9 +341,8 @@
    *   Returns TRUE.
    */
   public function chmod($mode) {
+    $this->_assert_constructor_called();
     $this->_test_log("chmod($mode) called.");
-    // This line will be important if this function ever gets implemented.
-    //$this->_assert_constructor_called();
     return TRUE;
   }
   
@@ -382,7 +381,7 @@
     $target = $this->getTarget($uri);
     $dirname = dirname($target);
     
-    // Special case for calls to dirname('s3://'), ensuring that recursive 
+    // Special case for calls to dirname('s3://'), ensuring that recursive
     // calls eventually bottom out.
     if ($dirname == '.') {
       $dirname = '';
@@ -653,7 +652,7 @@
    * @return
    *   TRUE if resource was successfully deleted, regardless of whether or not
    *   the file actually existed.
-   *   FALSE if the call to S3 failed, in which case the file will not be 
+   *   FALSE if the call to S3 failed, in which case the file will not be
    *   removed from the cache.
    *
    * @see http://php.net/manual/en/streamwrapper.unlink.php
@@ -754,7 +753,7 @@
     $this->_test_log("mkdir($uri, $mode, $options) called.");
     
     // If this URI already exists in the cache, return TRUE if it's a folder
-    // (so that recursive calls won't improperly report failure when they 
+    // (so that recursive calls won't improperly report failure when they
     // reach an existing ancestor), or FALSE if it's a file (failure).
     $test_metadata = $this->_amazons3_read_cache($uri);
     if ($test_metadata) {
@@ -762,17 +761,17 @@
     }
     
     // S3 is a flat file system, with no concept of directories (just files
-    // with slashes in their names). To represent folders, we store them in the 
-    // metadata cache, without creating anything in S3. 
+    // with slashes in their names). To represent folders, we store them in the
+    // metadata cache, without creating anything in S3.
     $metadata = _amazons3_format_metadata($uri, array());
     $metadata['timestamp'] = date('U', time());
     $this->_amazons3_write_cache($metadata);
     
-    // If the STREAM_MKDIR_RECURSIVE option was specified, also create all the 
+    // If the STREAM_MKDIR_RECURSIVE option was specified, also create all the
     // ancestor folders of this uri.
     $parent_dir = drupal_dirname($uri);
     if (($options & STREAM_MKDIR_RECURSIVE) && $parent_dir != 's3://') {
-      return $this->mkdir($parent_dir, $mode, $options); 
+      return $this->mkdir($parent_dir, $mode, $options);
     }
     return TRUE;
   }
@@ -786,7 +785,7 @@
    *   A bit mask of STREAM_REPORT_ERRORS.
    *
    * @return
-   *   TRUE if directory was successfully removed. 
+   *   TRUE if directory was successfully removed.
    *   FALSE if the directory was not empty.
    *
    * @see http://php.net/manual/en/streamwrapper.rmdir.php
@@ -866,11 +865,10 @@
     
     // Get the list of uris for files and folders which are in the specified
     // folder, but not in any of its subfolders.
-    $query = db_query("SELECT uri FROM {amazons3_file} WHERE uri LIKE :folder AND uri NOT LIKE :subfolder", 
+    $query = db_query("SELECT uri FROM {amazons3_file} WHERE uri LIKE :folder AND uri NOT LIKE :subfolder",
         array(':folder' => "$uri%", ':subfolder' => "$uri%/%"));
     
-    // Create $this->dir as an empty array, even if there aren't any matching 
-    // files, since the folder might be empty.
+    // Create $this->dir as an empty array, since the folder might be empty.
     $this->dir = array();
     foreach ($query->fetchAll(PDO::FETCH_COLUMN, 0) as $uri) {
       $this->dir[] = basename($uri);
@@ -957,7 +955,7 @@
   }
   
   /**
-   * Get the status of the file with the specified URI. 
+   * Get the status of the file with the specified URI.
    *
    * @return
    *   An array with file status, or FALSE if the file doesn't exist.
@@ -1022,7 +1020,7 @@
   /**
    * Try to fetch an object from the metadata cache. If that file isn't in the
    * cache, it is considered to be nonexistant.
-   * 
+   *
    * @param uri
    *   A string containing the uri of the resource to check.
    *
@@ -1030,6 +1028,7 @@
    *    An array if the $uri exists, otherwise FALSE.
    */
   protected function _amazons3_get_object($uri) {
+    // Since this is an internal function, don't log it by default.
     //$this->_test_log("_amazons3_get_object($uri) called.");
     // For the root directory, just return metadata for a generic folder.
     if ($uri == 's3://' || $uri == 's3:') {
@@ -1045,7 +1044,7 @@
     
     // If cache ignore is enabled, query S3 for all file requests.
     if (variable_get('amazons3_ignore_cache', FALSE)) {
-      // Even when ignoring the cache, we still read folders from it, because 
+      // Even when ignoring the cache, we still read folders from it, because
       // they aren't stored in S3.
       if (!empty($metadata['dir'])) {
         return $metadata;
@@ -1072,6 +1071,7 @@
    *   An array of metadata if the $uri is in the cache, otherwise FALSE.
    */
   protected function _amazons3_read_cache($uri) {
+    // Since this is an internal function, don't log it by default.
     //$this->_test_log("_amazons3_read_cache($uri) called.");
     $record = db_query("SELECT * FROM {amazons3_file} WHERE uri = :uri", array(':uri' => $uri))->fetchAssoc();
     return $record ? $record : FALSE;
@@ -1094,6 +1094,7 @@
    *   If an exception occurs from the database call, it will percolate out of this function.
    */
   protected function _amazons3_write_cache($metadata) {
+    // Since this is an internal function, don't log it by default.
     //$this->_test_log("_amazons3_write_cache({$metadata['uri']}) called.");
     db_merge('amazons3_file')
       ->key(array('uri' => $metadata['uri']))
@@ -1117,6 +1118,7 @@
    *   If an exception occurs from the database call, it will percolate out of this function.
    */
   protected function _amazons3_delete_cache($uri) {
+    // Since this is an internal function, don't log it by default.
     //$this->_test_log("_amazons3_delete_cache($uri) called.");
     $delete_query = db_delete('amazons3_file');
     if (is_array($uri)) {
diff -u amazons3.drush.inc amazons3.drush.inc
--- amazons3.drush.inc	2013-07-22 15:35:28.000000000 -0700
+++ amazons3.drush.inc	2013-09-11 15:30:05.000000000 -0700
@@ -8,7 +8,7 @@
   
   // The 'amazons3-refresh-cache' command
   $items['amazons3-refresh-cache'] = array(
-    'description' => "Refreshes the AmazonS3 file metadata cache. Your Amazon S3 credentials, and the name of your site's bucket, must be configured first.",
+    'description' => t("Refreshes the AmazonS3 file metadata cache. Your Amazon S3 credentials, and the name of your site's bucket, must be configured first."),
     'aliases' => array('s3rc'),
     'callback' => 'drush_amazons3_refresh_cache',
   );
diff -u amazons3.install amazons3.install
--- amazons3.install	2013-07-22 15:37:03.000000000 -0700
+++ amazons3.install	2013-09-11 15:31:50.000000000 -0700
@@ -17,11 +17,11 @@
 
   $fopen_allowed = ini_get('allow_url_fopen');
   $ok_message = $t('The PHP allow_url_fopen setting is on.');
-  $error_message = $t('Amazon S3 module requires that the allow_url_fopen setting be turned on in php.ini.');
+  $error_message = $t('The AmazonS3 module requires that the allow_url_fopen setting be turned on in php.ini.');
 
   $requirements['amazons3_allow_url_fopen'] = array(
     'severity' => $fopen_allowed ? REQUIREMENT_OK : REQUIREMENT_ERROR,
-    'title' => $t('AmazonS3'),
+    'title' => 'AmazonS3',
     'value' => 'allow_url_fopen',
     'description' => $fopen_allowed ? $ok_message : $error_message,
   );
@@ -50,7 +50,7 @@
     'description' => 'Stores information for uploaded Amazon S3 files.',
     'fields' => array(
       'uri' => array(
-        'description' => 'The URI to access the file (either local or remote).',
+        'description' => 'The S3 URI of the file.',
         'type' => 'varchar',
         'length' => 255,
         'not null' => TRUE,
@@ -84,7 +84,7 @@
         'default' => 0,
       ),
       'uid' => array(
-        'description' => 'The uid of the user who is associated with the file (not Drupal uid).',
+        'description' => 'The S3 uid of the user who is associated with the file.',
         'type' => 'varchar',
         'length' => 255,
         'not null' => TRUE,
@@ -95,6 +95,7 @@
       'timestamp' => array('timestamp'),
     ),
     'primary key' => array('uri'),
+    'collation' => 'utf8_bin'
   );
 
   return $schema;
@@ -185,3 +186,13 @@
     drupal_set_message(t('Unable to determine AmazonS3 bucket name for cache refresh. Please set the bucket name and perform a manual cache refresh from the AmazonS3 configuration page.'), 'warning');
   }
 }
+
+/**
+ * Updates the amazons3_file table to use case sensitive collation.
+ */
+function amazons3_update_7201() {
+  // As stated here: http://forums.mysql.com/read.php?103,19380,200971#msg-200971
+  // MySQL doesn't directly support case sensitive UTF8 collation. Fortunately,
+  // 'utf8_bin' collation is good enough for our purposes.
+  db_query("alter table amazons3_file convert to character set utf8 collate utf8_bin;");
+}
diff -u amazons3.module amazons3.module
--- amazons3.module	2013-07-23 16:53:12.000000000 -0700
+++ amazons3.module	2013-09-11 16:50:09.000000000 -0700
@@ -28,7 +28,7 @@
 
   $items['admin/config/media/amazons3'] = array(
     'title' => 'Amazon S3',
-    'description' => 'Configure your S3 credentials.',
+    'description' => t('Configure Amazons S3 settings.'),
     'page callback' => 'drupal_get_form',
     'page arguments' => array('amazons3_admin'),
     'access arguments' => array('administer amazons3'),
@@ -55,10 +55,12 @@
   switch ($path) {
     case 'admin/config/media/amazons3':
     if (module_exists('awssdk_ui')) {
-      return '<p>' . t('Amazon Web Services authentication can be configured on the <a href="@awssdk_config">AWS SDK configuration page</a>.', array('@awssdk_config' => url('admin/config/media/awssdk'))) . '</p>';
+      return '<p>' . t('Amazon Web Services authentication can be configured on the <a href="@awssdk_config">AWS SDK configuration page</a>.',
+        array('@awssdk_config' => url('admin/config/media/awssdk'))) . '</p>';
     }
     else {
-      return '<p>' . t('To configure your Amazon Web Services credentials, enable the \'AWS SDK for PHP UI\' module, or define those settings in the $conf array in settings.php.') . '</p>';
+      return '<p>' . t('To configure your Amazon Web Services credentials, enable the \'AWS SDK for PHP UI\' module,
+        or define those settings in the $conf array in settings.php.') . '</p>';
     }
   }
 }
@@ -79,14 +81,14 @@
   $form['amazons3_cname'] = array(
     '#type'           => 'checkbox',
     '#title'          => t('Enable CNAME'),
-    '#description'    => t('Serve files from a custom domain by using an appropriately named bucket e.g. "mybucket.mydomain.com".'),
+    '#description'    => t('Serve files from a custom domain by using an appropriately named bucket, e.g. "mybucket.mydomain.com".'),
     '#default_value'  => variable_get('amazons3_cname', 0),
   );
 
   $form['amazons3_domain'] = array(
     '#type'           => 'textfield',
     '#title'          => t('CDN Domain Name'),
-    '#description'    => t('If serving files from CloudFront then the bucket name can differ from the domain name.'),
+    '#description'    => t('If serving files from CloudFront, the bucket name can differ from the domain name.'),
     '#default_value'  => variable_get('amazons3_domain', ''),
     '#states'         => array(
       'visible' => array(
@@ -126,7 +128,7 @@
   );
 
   $form['amazons3_refresh_cache']['refresh'] = array(
-    '#type' => 'submit', 
+    '#type' => 'submit',
     '#suffix' => '<div class="refresh">' . t("This button queries S3 for the metadata of <i><b>all</b></i> the files in your site's bucket, and saves it to the database. This may take a while for buckets with many thousands of files. <br>It should only be necessary to use this button if you've just installed AmazonS3 and you need to cache all the pre-existing files in your bucket, or if you need to restore your metadata cache from scratch for some other reason.") . '</div>',
     '#value' => t('Refresh file metadata cache'),
     '#submit' => array('amazons3_refresh_cache_submit'),
@@ -157,7 +159,7 @@
 
 /**
  * Checks all the configuration options to ensure that they're valid.
- * 
+ *
  * @return
  *   TRUE if config is good to go, otherwise FALSE.
  */
@@ -203,12 +205,12 @@
 }
 
 /**
- * Calls AmazonS3::list_objects() enough times to get all the files in the 
+ * Calls AmazonS3::list_objects() enough times to get all the files in the
  * specified bucket (the API returns at most 1000 per call), and stores their
- * metadata in the cache table. 
- * 
+ * metadata in the cache table.
+ *
  * Once the file metadata has been created, the the folder metadata will
- * also be refreshed. 
+ * also be refreshed.
  */
 function _amazons3_refresh_cache($bucket) {
   // Don't try to do anything if our configuration settings are invalid.
@@ -269,12 +271,13 @@
     }
     catch (PDOException $e) {
       if ($e->getCode() == 23000) {
-        // This happens if there are two files in S3 with the same name, but
-        // different capitalization, and the database doesn't support keys which
-        // are case-insensitively identical (e.g. MySQL on OSX).
-        // When this happens, the best we can do is redo each insert one at a time,
-        // catching and logging the individual failures.
-        foreach ($metadata_list as $metadata) {
+        // This shouldn't ever happen!!!
+        // I originally coded this error correction for the case when there are two files in S3 with the same name, but
+        // different capitalization. By default, MySQL doesn't allow string which are case-insensitively identical, but
+        // I found out how to get around that (see amazons3_update_7201()).
+        // Just in case this does ever happen, though, the best we can do is redo each insert one at a time, catching
+        // and logging the individual failures.
+        foreach ($file_metadata_list as $metadata) {
           try {
             db_insert('amazons3_file')
               ->fields($metadata_fields)
@@ -282,13 +285,14 @@
               ->execute();
           }
           catch (PDOException $e) {
-            drupal_set_message(t('The file @uri has the same name as another file in S3, but with different capitalization. ' .
-              'Your database does not support that, so the file cannot be cached, and will thus be treated as non-existent. It is strongly advised that you rename this file.', array('@uri' => $metadata['uri'])), 'warning');
+            drupal_set_message(t("The file @uri has the same name as another file in S3, but with different capitalization.
+              If you haven't done so already, be sure to run the database update script (drush updb).
+              If you've already done that, something is very wrong, and you should post a ticket to the AmazonS3 issue queue.", array('@uri' => $metadata['uri'])), 'warning');
           }
         }
       }
       else {
-        // Other exception are unexpected, and should be percolated as normal.
+        // Other exceptions are unexpected, and should be percolated as normal.
         throw $e;
       }
     }
@@ -365,12 +369,12 @@
 }
 
 /**
- * Converts objects returned by AmazonS3::get_objects() into s3 metadata arrays 
+ * Converts objects returned by AmazonS3::get_objects() into s3 metadata arrays
  * compatible with those returned by AmazonS3::get_object_metadata();
  */
 function _amazons3_s3_object_to_s3_metadata($object) {
-  // This is a sloppy but effective way to do a deep conversion of an object 
-  // into a multi-dimentional array, found here: 
+  // This is a sloppy but effective way to do a deep conversion of an object
+  // into a multi-dimentional array, found here:
   // http://stackoverflow.com/a/2476954/464318
   $s3_metadata = json_decode(json_encode($object), true);
   return $s3_metadata;
