Indentation fixes, untabify and related whitespace-cleanup. No code changed.
authorbagder <bagder>
Fri, 27 Feb 2009 08:53:10 +0000 (08:53 +0000)
committerbagder <bagder>
Fri, 27 Feb 2009 08:53:10 +0000 (08:53 +0000)
lib/amigaos.c
lib/cookie.h
lib/http_ntlm.c
lib/http_ntlm.h
lib/krb5.c
lib/nss.c
lib/telnet.c
lib/transfer.c

index 64696e8..c7438a4 100644 (file)
@@ -33,42 +33,39 @@ extern int errno, h_errno;
 #include <stabs.h>
 void __request(const char *msg);
 #else
-# define __request( msg )      Printf( msg "\n\a")
+# define __request( msg )       Printf( msg "\n\a")
 #endif
 
 void amiga_cleanup()
 {
-       if(SocketBase) {
-               CloseLibrary(SocketBase);
-               SocketBase = NULL;
-       }
+  if(SocketBase) {
+    CloseLibrary(SocketBase);
+    SocketBase = NULL;
+  }
 }
 
 BOOL amiga_init()
 {
-       if(!SocketBase)
-               SocketBase = OpenLibrary("bsdsocket.library", 4);
-       
-       if(!SocketBase) {
-               __request("No TCP/IP Stack running!");
-               return FALSE;
-       }
-       
-       if(SocketBaseTags(
-               SBTM_SETVAL(SBTC_ERRNOPTR(sizeof(errno))), (ULONG) &errno,
-//             SBTM_SETVAL(SBTC_HERRNOLONGPTR),           (ULONG) &h_errno,
-               SBTM_SETVAL(SBTC_LOGTAGPTR),               (ULONG) "cURL",
-       TAG_DONE)) {
-               
-               __request("SocketBaseTags ERROR");
-               return FALSE;
-       }
-       
+  if(!SocketBase)
+    SocketBase = OpenLibrary("bsdsocket.library", 4);
+
+  if(!SocketBase) {
+    __request("No TCP/IP Stack running!");
+    return FALSE;
+  }
+
+  if(SocketBaseTags(SBTM_SETVAL(SBTC_ERRNOPTR(sizeof(errno))), (ULONG) &errno,
+                    SBTM_SETVAL(SBTC_LOGTAGPTR), (ULONG) "cURL",
+                    TAG_DONE)) {
+    __request("SocketBaseTags ERROR");
+    return FALSE;
+  }
+
 #ifndef __libnix__
-       atexit(amiga_cleanup);
+  atexit(amiga_cleanup);
 #endif
-       
-       return TRUE;
+
+  return TRUE;
 }
 
 #ifdef __libnix__
index a1d1073..a9afc41 100644 (file)
@@ -91,7 +91,7 @@ struct Cookie *Curl_cookie_add(struct SessionHandle *data,
 struct CookieInfo *Curl_cookie_init(struct SessionHandle *data,
                                     const char *, struct CookieInfo *, bool);
 struct Cookie *Curl_cookie_getlist(struct CookieInfo *, const char *,
-                                  const char *, bool);
+                                   const char *, bool);
 void Curl_cookie_freelist(struct Cookie *cookies, bool cookiestoo);
 void Curl_cookie_clearall(struct CookieInfo *cookies);
 void Curl_cookie_clearsess(struct CookieInfo *cookies);
index 59a2d69..9742e19 100644 (file)
@@ -347,7 +347,7 @@ static void setup_des_key(const unsigned char *key_56,
  * Turns a 56 bit key into the 64 bit, odd parity key and sets the key.
  */
 static void setup_des_key(const unsigned char *key_56,
-                         gcry_cipher_hd_t *des)
+                          gcry_cipher_hd_t *des)
 {
   char key[8];
 
index 17f98e8..69b76a5 100644 (file)
@@ -34,7 +34,7 @@ typedef enum {
 
 /* this is for ntlm header input */
 CURLntlm Curl_input_ntlm(struct connectdata *conn, bool proxy,
-                        const char *header);
+                         const char *header);
 
 /* this is for creating ntlm header output */
 CURLcode Curl_output_ntlm(struct connectdata *conn, bool proxy);
index 5b921d9..ab25718 100644 (file)
@@ -132,15 +132,15 @@ krb5_encode(void *app_data, const void *from, int length, int level, void **to,
   /* shut gcc up */
   conn = NULL;
 
-  /* NOTE that the cast is safe, neither of the krb5, gnu gss and heimdal 
+  /* NOTE that the cast is safe, neither of the krb5, gnu gss and heimdal
    * libraries modify the input buffer in gss_seal()
    */
   dec.value = (void*)from;
   dec.length = length;
   maj = gss_seal(&min, *context,
-                level == prot_private,
-                GSS_C_QOP_DEFAULT,
-                &dec, &state, &enc);
+                 level == prot_private,
+                 GSS_C_QOP_DEFAULT,
+                 &dec, &state, &enc);
 
   if(maj != GSS_S_COMPLETE)
     return -1;
@@ -178,10 +178,12 @@ krb5_auth(void *app_data, struct connectdata *conn)
 
   chan.initiator_addrtype = GSS_C_AF_INET;
   chan.initiator_address.length = l - 4;
-  chan.initiator_address.value = &((struct sockaddr_in *)LOCAL_ADDR)->sin_addr.s_addr;
+  chan.initiator_address.value =
+    &((struct sockaddr_in *)LOCAL_ADDR)->sin_addr.s_addr;
   chan.acceptor_addrtype = GSS_C_AF_INET;
   chan.acceptor_address.length = l - 4;
-  chan.acceptor_address.value = &((struct sockaddr_in *)REMOTE_ADDR)->sin_addr.s_addr;
+  chan.acceptor_address.value =
+    &((struct sockaddr_in *)REMOTE_ADDR)->sin_addr.s_addr;
   chan.application_data.length = 0;
   chan.application_data.value = NULL;
 
@@ -192,12 +194,12 @@ krb5_auth(void *app_data, struct connectdata *conn)
       result = Curl_ftpsendf(conn, "AUTH GSSAPI");
 
       if(result)
-       return -2;
+        return -2;
       if(Curl_GetFTPResponse(&nread, conn, NULL))
-       return -1;
+        return -1;
 
       if(data->state.buffer[0] != '3')
-       return -1;
+        return -1;
     }
 
     gssbuf.value = data->state.buffer;
@@ -206,8 +208,8 @@ krb5_auth(void *app_data, struct connectdata *conn)
     if(maj != GSS_S_COMPLETE) {
       gss_release_name(&min, &gssname);
       if(service == srv_host) {
-       Curl_failf(data, "Error importing service name %s", gssbuf.value);
-       return AUTH_ERROR;
+        Curl_failf(data, "Error importing service name %s", gssbuf.value);
+        return AUTH_ERROR;
       }
       service = srv_host;
       continue;
@@ -224,69 +226,71 @@ krb5_auth(void *app_data, struct connectdata *conn)
     do {
       ret = AUTH_OK;
       maj = gss_init_sec_context(&min,
-                                GSS_C_NO_CREDENTIAL,
-                                context,
-                                gssname,
-                                GSS_C_NO_OID,
-                                GSS_C_MUTUAL_FLAG | GSS_C_REPLAY_FLAG,
-                                0,
-                                &chan,
-                                gssresp,
-                                NULL,
-                                &gssbuf,
-                                NULL,
-                                NULL);
+                                 GSS_C_NO_CREDENTIAL,
+                                 context,
+                                 gssname,
+                                 GSS_C_NO_OID,
+                                 GSS_C_MUTUAL_FLAG | GSS_C_REPLAY_FLAG,
+                                 0,
+                                 &chan,
+                                 gssresp,
+                                 NULL,
+                                 &gssbuf,
+                                 NULL,
+                                 NULL);
 
       if(gssresp) {
-       free(_gssresp.value);
-       gssresp = NULL;
+        free(_gssresp.value);
+        gssresp = NULL;
       }
 
       if(maj != GSS_S_COMPLETE && maj != GSS_S_CONTINUE_NEEDED) {
-       Curl_infof(data, "Error creating security context");
-       ret = AUTH_ERROR;
-       break;
+        Curl_infof(data, "Error creating security context");
+        ret = AUTH_ERROR;
+        break;
       }
 
       if(gssbuf.length != 0) {
-       if(Curl_base64_encode(data, (char *)gssbuf.value, gssbuf.length, &p) < 1) {
-         Curl_infof(data, "Out of memory base64-encoding");
-         ret = AUTH_CONTINUE;
-         break;
-       }
-
-       result = Curl_ftpsendf(conn, "ADAT %s", p);
-
-       free(p);
-
-       if(result) {
-         ret = -2;
-         break;
-       }
-
-       if(Curl_GetFTPResponse(&nread, conn, NULL)) {
-         ret = -1;
-         break;
-       }
-
-       if(data->state.buffer[0] != '2' && data->state.buffer[0] != '3'){
-         Curl_infof(data, "Server didn't accept auth data\n");
-         ret = AUTH_ERROR;
-         break;
-       }
-
-       p = data->state.buffer + 4;
-       p = strstr(p, "ADAT=");
-       if(p) {
-         _gssresp.length = Curl_base64_decode(p + 5, (unsigned char **)&_gssresp.value);
-         if(_gssresp.length < 1) {
-           Curl_failf(data, "Out of memory base64-encoding");
-           ret = AUTH_CONTINUE;
-           break;
-         }
-       }
-
-       gssresp = &_gssresp;
+        if(Curl_base64_encode(data, (char *)gssbuf.value, gssbuf.length, &p)
+           < 1) {
+          Curl_infof(data, "Out of memory base64-encoding");
+          ret = AUTH_CONTINUE;
+          break;
+        }
+
+        result = Curl_ftpsendf(conn, "ADAT %s", p);
+
+        free(p);
+
+        if(result) {
+          ret = -2;
+          break;
+        }
+
+        if(Curl_GetFTPResponse(&nread, conn, NULL)) {
+          ret = -1;
+          break;
+        }
+
+        if(data->state.buffer[0] != '2' && data->state.buffer[0] != '3'){
+          Curl_infof(data, "Server didn't accept auth data\n");
+          ret = AUTH_ERROR;
+          break;
+        }
+
+        p = data->state.buffer + 4;
+        p = strstr(p, "ADAT=");
+        if(p) {
+          _gssresp.length = Curl_base64_decode(p + 5, (unsigned char **)
+                                               &_gssresp.value);
+          if(_gssresp.length < 1) {
+            Curl_failf(data, "Out of memory base64-encoding");
+            ret = AUTH_CONTINUE;
+            break;
+          }
+        }
+
+        gssresp = &_gssresp;
       }
     } while(maj == GSS_S_CONTINUE_NEEDED);
 
index 83ad6eb..ce9e0da 100644 (file)
--- a/lib/nss.c
+++ b/lib/nss.c
@@ -455,7 +455,7 @@ static int nss_load_crl(const char* crlfilename, PRBool ascii)
                         NULL,SEC_CRL_TYPE,
                         NULL,CRL_IMPORT_DEFAULT_OPTIONS,
                         NULL,(CRL_DECODE_DEFAULT_OPTIONS|
-                             CRL_DECODE_DONT_COPY_DER));
+                              CRL_DECODE_DONT_COPY_DER));
   if (slot) PK11_FreeSlot(slot);
   if (!crl) return 0;
   SEC_DestroyCrl(crl);
index ba405d7..750fa23 100644 (file)
@@ -1124,7 +1124,7 @@ void telrcv(struct connectdata *conn,
 /* Escape and send a telnet data block */
 /* TODO: write large chunks of data instead of one byte at a time */
 static CURLcode send_telnet_data(struct connectdata *conn,
-                                char *buffer, ssize_t nread)
+                                 char *buffer, ssize_t nread)
 {
   unsigned char outbuf[2];
   ssize_t bytes_written, total_written;
@@ -1144,16 +1144,16 @@ static CURLcode send_telnet_data(struct connectdata *conn,
       pfd[0].fd = conn->sock[FIRSTSOCKET];
       pfd[0].events = POLLOUT;
       switch (Curl_poll(pfd, 1, -1)) {
-       case -1:                    /* error, abort writing */
-       case 0:                     /* timeout (will never happen) */
-         rc = CURLE_SEND_ERROR;
-         break;
-       default:                    /* write! */
-         bytes_written = 0;
-         rc = Curl_write(conn, conn->sock[FIRSTSOCKET], outbuf+total_written,
-                         out_count-total_written, &bytes_written);
-         total_written += bytes_written;
-         break;
+        case -1:                    /* error, abort writing */
+        case 0:                     /* timeout (will never happen) */
+          rc = CURLE_SEND_ERROR;
+          break;
+        default:                    /* write! */
+          bytes_written = 0;
+          rc = Curl_write(conn, conn->sock[FIRSTSOCKET], outbuf+total_written,
+                          out_count-total_written, &bytes_written);
+          total_written += bytes_written;
+          break;
       }
     /* handle partial write */
     } while (rc == CURLE_OK && total_written < out_count);
@@ -1317,7 +1317,7 @@ static CURLcode telnet_do(struct connectdata *conn, bool *done)
       while(1) {
         if(!PeekNamedPipe(stdin_handle, NULL, 0, NULL, &readfile_read, NULL)) {
           keepon = FALSE;
-         code = CURLE_READ_ERROR;
+          code = CURLE_READ_ERROR;
           break;
         }
 
@@ -1327,15 +1327,15 @@ static CURLcode telnet_do(struct connectdata *conn, bool *done)
         if(!ReadFile(stdin_handle, buf, sizeof(data->state.buffer),
                      &readfile_read, NULL)) {
           keepon = FALSE;
-         code = CURLE_READ_ERROR;
+          code = CURLE_READ_ERROR;
           break;
         }
 
         code = send_telnet_data(conn, buf, readfile_read);
-       if(code) {
+        if(code) {
           keepon = FALSE;
-         break;
-       }
+          break;
+        }
       }
     }
     break;
@@ -1345,14 +1345,14 @@ static CURLcode telnet_do(struct connectdata *conn, bool *done)
       if(!ReadFile(stdin_handle, buf, sizeof(data->state.buffer),
                    &readfile_read, NULL)) {
         keepon = FALSE;
-       code = CURLE_READ_ERROR;
+        code = CURLE_READ_ERROR;
         break;
       }
 
       code = send_telnet_data(conn, buf, readfile_read);
       if(code) {
-       keepon = FALSE;
-       break;
+        keepon = FALSE;
+        break;
       }
     }
     break;
@@ -1417,10 +1417,10 @@ static CURLcode telnet_do(struct connectdata *conn, bool *done)
       if(pfd[1].revents & POLLIN) { /* read from stdin */
         nread = read(0, buf, 255);
         code = send_telnet_data(conn, buf, nread);
-       if(code) {
+        if(code) {
           keepon = FALSE;
-         break;
-       }
+          break;
+        }
       }
 
       if(pfd[0].revents & POLLIN) {
index fe25ecc..6b9e310 100644 (file)
@@ -377,11 +377,11 @@ static CURLcode readwrite_data(struct SessionHandle *data,
 
     if(k->size != -1 && !k->header) {
       /* make sure we don't read "too much" if we can help it since we
-        might be pipelining and then someone else might want to read what
-        follows! */
+         might be pipelining and then someone else might want to read what
+         follows! */
       curl_off_t totalleft = k->size - k->bytecount;
       if(totalleft < (curl_off_t)bytestoread)
-       bytestoread = (size_t)totalleft;
+        bytestoread = (size_t)totalleft;
     }
 
     if(bytestoread) {
@@ -390,25 +390,25 @@ static CURLcode readwrite_data(struct SessionHandle *data,
 
       /* subzero, this would've blocked */
       if(0 > readrc)
-       break; /* get out of loop */
+        break; /* get out of loop */
 
       /* get the CURLcode from the int */
       result = (CURLcode)readrc;
 
       if(result>0)
-       return result;
+        return result;
     }
     else {
       /* read nothing but since we wanted nothing we consider this an OK
-        situation to proceed from */
+         situation to proceed from */
       nread = 0;
     }
 
     if((k->bytecount == 0) && (k->writebytecount == 0)) {
       Curl_pgrsTime(data, TIMER_STARTTRANSFER);
       if(k->exp100 > EXP100_SEND_DATA)
-       /* set time stamp to compare with when waiting for the 100 */
-       k->start100 = Curl_tvnow();
+        /* set time stamp to compare with when waiting for the 100 */
+        k->start100 = Curl_tvnow();
     }
 
     *didwhat |= KEEP_READ;
@@ -421,7 +421,7 @@ static CURLcode readwrite_data(struct SessionHandle *data,
     }
     else if(0 >= nread) {
       /* if we receive 0 or less here, the server closed the connection
-        and we bail out from this! */
+         and we bail out from this! */
       DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
       k->keepon &= ~KEEP_READ;
       break;
@@ -439,10 +439,10 @@ static CURLcode readwrite_data(struct SessionHandle *data,
       bool stop_reading = FALSE;
       result = readwrite_http_headers(data, conn, k, &nread, &stop_reading);
       if(result)
-       return result;
+        return result;
       if(stop_reading)
-       /* We've stopped dealing with input, get out of the do-while loop */
-       break;
+        /* We've stopped dealing with input, get out of the do-while loop */
+        break;
     }
 #endif /* CURL_DISABLE_HTTP */
 
@@ -454,148 +454,148 @@ static CURLcode readwrite_data(struct SessionHandle *data,
 
 #ifndef CURL_DISABLE_HTTP
       if(0 == k->bodywrites && !is_empty_data) {
-       /* These checks are only made the first time we are about to
-          write a piece of the body */
-       if(conn->protocol&PROT_HTTP) {
-         /* HTTP-only checks */
-
-         if(data->req.newurl) {
-           if(conn->bits.close) {
-             /* Abort after the headers if "follow Location" is set
-                and we're set to close anyway. */
-             k->keepon &= ~KEEP_READ;
-             *done = TRUE;
-             return CURLE_OK;
-           }
-           /* We have a new url to load, but since we want to be able
-              to re-use this connection properly, we read the full
-              response in "ignore more" */
-           k->ignorebody = TRUE;
-           infof(data, "Ignoring the response-body\n");
-         }
-         if(data->state.resume_from && !k->content_range &&
-            (data->set.httpreq==HTTPREQ_GET) &&
-            !k->ignorebody) {
-           /* we wanted to resume a download, although the server doesn't
-            * seem to support this and we did this with a GET (if it
-            * wasn't a GET we did a POST or PUT resume) */
-           failf(data, "HTTP server doesn't seem to support "
-                 "byte ranges. Cannot resume.");
-           return CURLE_RANGE_ERROR;
-         }
-
-         if(data->set.timecondition && !data->state.range) {
-           /* A time condition has been set AND no ranges have been
-              requested. This seems to be what chapter 13.3.4 of
-              RFC 2616 defines to be the correct action for a
-              HTTP/1.1 client */
-           if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
-             switch(data->set.timecondition) {
-             case CURL_TIMECOND_IFMODSINCE:
-             default:
-               if(k->timeofdoc < data->set.timevalue) {
-                 infof(data,
-                       "The requested document is not new enough\n");
-                 *done = TRUE;
+        /* These checks are only made the first time we are about to
+           write a piece of the body */
+        if(conn->protocol&PROT_HTTP) {
+          /* HTTP-only checks */
+
+          if(data->req.newurl) {
+            if(conn->bits.close) {
+              /* Abort after the headers if "follow Location" is set
+                 and we're set to close anyway. */
+              k->keepon &= ~KEEP_READ;
+              *done = TRUE;
+              return CURLE_OK;
+            }
+            /* We have a new url to load, but since we want to be able
+               to re-use this connection properly, we read the full
+               response in "ignore more" */
+            k->ignorebody = TRUE;
+            infof(data, "Ignoring the response-body\n");
+          }
+          if(data->state.resume_from && !k->content_range &&
+             (data->set.httpreq==HTTPREQ_GET) &&
+             !k->ignorebody) {
+            /* we wanted to resume a download, although the server doesn't
+             * seem to support this and we did this with a GET (if it
+             * wasn't a GET we did a POST or PUT resume) */
+            failf(data, "HTTP server doesn't seem to support "
+                  "byte ranges. Cannot resume.");
+            return CURLE_RANGE_ERROR;
+          }
+
+          if(data->set.timecondition && !data->state.range) {
+            /* A time condition has been set AND no ranges have been
+               requested. This seems to be what chapter 13.3.4 of
+               RFC 2616 defines to be the correct action for a
+               HTTP/1.1 client */
+            if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
+              switch(data->set.timecondition) {
+              case CURL_TIMECOND_IFMODSINCE:
+              default:
+                if(k->timeofdoc < data->set.timevalue) {
+                  infof(data,
+                        "The requested document is not new enough\n");
+                  *done = TRUE;
                   data->info.timecond = TRUE;
-                 return CURLE_OK;
-               }
-               break;
-             case CURL_TIMECOND_IFUNMODSINCE:
-               if(k->timeofdoc > data->set.timevalue) {
-                 infof(data,
-                       "The requested document is not old enough\n");
-                 *done = TRUE;
+                  return CURLE_OK;
+                }
+                break;
+              case CURL_TIMECOND_IFUNMODSINCE:
+                if(k->timeofdoc > data->set.timevalue) {
+                  infof(data,
+                        "The requested document is not old enough\n");
+                  *done = TRUE;
                   data->info.timecond = TRUE;
-                 return CURLE_OK;
-               }
-               break;
-             } /* switch */
-           } /* two valid time strings */
-         } /* we have a time condition */
-
-       } /* this is HTTP */
+                  return CURLE_OK;
+                }
+                break;
+              } /* switch */
+            } /* two valid time strings */
+          } /* we have a time condition */
+
+        } /* this is HTTP */
       } /* this is the first time we write a body part */
 #endif /* CURL_DISABLE_HTTP */
       k->bodywrites++;
 
       /* pass data to the debug function before it gets "dechunked" */
       if(data->set.verbose) {
-       if(k->badheader) {
-         Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
-                    (size_t)k->hbuflen, conn);
-         if(k->badheader == HEADER_PARTHEADER)
-           Curl_debug(data, CURLINFO_DATA_IN,
-                      k->str, (size_t)nread, conn);
-       }
-       else
-         Curl_debug(data, CURLINFO_DATA_IN,
-                    k->str, (size_t)nread, conn);
+        if(k->badheader) {
+          Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
+                     (size_t)k->hbuflen, conn);
+          if(k->badheader == HEADER_PARTHEADER)
+            Curl_debug(data, CURLINFO_DATA_IN,
+                       k->str, (size_t)nread, conn);
+        }
+        else
+          Curl_debug(data, CURLINFO_DATA_IN,
+                     k->str, (size_t)nread, conn);
       }
 
 #ifndef CURL_DISABLE_HTTP
       if(k->chunk) {
-       /*
-        * Here comes a chunked transfer flying and we need to decode this
-        * properly.  While the name says read, this function both reads
-        * and writes away the data. The returned 'nread' holds the number
-        * of actual data it wrote to the client.
-        */
-
-       CHUNKcode res =
-         Curl_httpchunk_read(conn, k->str, nread, &nread);
-
-       if(CHUNKE_OK < res) {
-         if(CHUNKE_WRITE_ERROR == res) {
-           failf(data, "Failed writing data");
-           return CURLE_WRITE_ERROR;
-         }
-         failf(data, "Received problem %d in the chunky parser", res);
-         return CURLE_RECV_ERROR;
-       }
-       else if(CHUNKE_STOP == res) {
-         size_t dataleft;
-         /* we're done reading chunks! */
-         k->keepon &= ~KEEP_READ; /* read no more */
-
-         /* There are now possibly N number of bytes at the end of the
-            str buffer that weren't written to the client.
-
-            We DO care about this data if we are pipelining.
-            Push it back to be read on the next pass. */
-
-         dataleft = conn->chunk.dataleft;
-         if(dataleft != 0) {
-           infof(conn->data, "Leftovers after chunking. "
-                 " Rewinding %d bytes\n",dataleft);
-           read_rewind(conn, dataleft);
-         }
-       }
-       /* If it returned OK, we just keep going */
+        /*
+         * Here comes a chunked transfer flying and we need to decode this
+         * properly.  While the name says read, this function both reads
+         * and writes away the data. The returned 'nread' holds the number
+         * of actual data it wrote to the client.
+         */
+
+        CHUNKcode res =
+          Curl_httpchunk_read(conn, k->str, nread, &nread);
+
+        if(CHUNKE_OK < res) {
+          if(CHUNKE_WRITE_ERROR == res) {
+            failf(data, "Failed writing data");
+            return CURLE_WRITE_ERROR;
+          }
+          failf(data, "Received problem %d in the chunky parser", res);
+          return CURLE_RECV_ERROR;
+        }
+        else if(CHUNKE_STOP == res) {
+          size_t dataleft;
+          /* we're done reading chunks! */
+          k->keepon &= ~KEEP_READ; /* read no more */
+
+          /* There are now possibly N number of bytes at the end of the
+             str buffer that weren't written to the client.
+
+             We DO care about this data if we are pipelining.
+             Push it back to be read on the next pass. */
+
+          dataleft = conn->chunk.dataleft;
+          if(dataleft != 0) {
+            infof(conn->data, "Leftovers after chunking. "
+                  " Rewinding %d bytes\n",dataleft);
+            read_rewind(conn, dataleft);
+          }
+        }
+        /* If it returned OK, we just keep going */
       }
 #endif   /* CURL_DISABLE_HTTP */
 
       if((-1 != k->maxdownload) &&
-        (k->bytecount + nread >= k->maxdownload)) {
-       /* The 'excess' amount below can't be more than BUFSIZE which
-          always will fit in a size_t */
-       size_t excess = (size_t)(k->bytecount + nread - k->maxdownload);
-       if(excess > 0 && !k->ignorebody) {
-         infof(data,
-               "Rewinding stream by : %d"
-               " bytes on url %s (size = %" FORMAT_OFF_T
-               ", maxdownload = %" FORMAT_OFF_T
-               ", bytecount = %" FORMAT_OFF_T ", nread = %d)\n",
-               excess, data->state.path,
-               k->size, k->maxdownload, k->bytecount, nread);
-         read_rewind(conn, excess);
-       }
-
-       nread = (ssize_t) (k->maxdownload - k->bytecount);
-       if(nread < 0 ) /* this should be unusual */
-         nread = 0;
-
-       k->keepon &= ~KEEP_READ; /* we're done reading */
+         (k->bytecount + nread >= k->maxdownload)) {
+        /* The 'excess' amount below can't be more than BUFSIZE which
+           always will fit in a size_t */
+        size_t excess = (size_t)(k->bytecount + nread - k->maxdownload);
+        if(excess > 0 && !k->ignorebody) {
+          infof(data,
+                "Rewinding stream by : %d"
+                " bytes on url %s (size = %" FORMAT_OFF_T
+                ", maxdownload = %" FORMAT_OFF_T
+                ", bytecount = %" FORMAT_OFF_T ", nread = %d)\n",
+                excess, data->state.path,
+                k->size, k->maxdownload, k->bytecount, nread);
+          read_rewind(conn, excess);
+        }
+
+        nread = (ssize_t) (k->maxdownload - k->bytecount);
+        if(nread < 0 ) /* this should be unusual */
+          nread = 0;
+
+        k->keepon &= ~KEEP_READ; /* we're done reading */
       }
 
       k->bytecount += nread;
@@ -603,70 +603,70 @@ static CURLcode readwrite_data(struct SessionHandle *data,
       Curl_pgrsSetDownloadCounter(data, k->bytecount);
 
       if(!k->chunk && (nread || k->badheader || is_empty_data)) {
-       /* If this is chunky transfer, it was already written */
-
-       if(k->badheader && !k->ignorebody) {
-         /* we parsed a piece of data wrongly assuming it was a header
-            and now we output it as body instead */
-         result = Curl_client_write(conn, CLIENTWRITE_BODY,
-                                    data->state.headerbuff,
-                                    k->hbuflen);
-         if(result)
-           return result;
-       }
-       if(k->badheader < HEADER_ALLBAD) {
-         /* This switch handles various content encodings. If there's an
-            error here, be sure to check over the almost identical code
-            in http_chunks.c.
-            Make sure that ALL_CONTENT_ENCODINGS contains all the
-            encodings handled here. */
+        /* If this is chunky transfer, it was already written */
+
+        if(k->badheader && !k->ignorebody) {
+          /* we parsed a piece of data wrongly assuming it was a header
+             and now we output it as body instead */
+          result = Curl_client_write(conn, CLIENTWRITE_BODY,
+                                     data->state.headerbuff,
+                                     k->hbuflen);
+          if(result)
+            return result;
+        }
+        if(k->badheader < HEADER_ALLBAD) {
+          /* This switch handles various content encodings. If there's an
+             error here, be sure to check over the almost identical code
+             in http_chunks.c.
+             Make sure that ALL_CONTENT_ENCODINGS contains all the
+             encodings handled here. */
 #ifdef HAVE_LIBZ
-         switch (conn->data->set.http_ce_skip ?
-                 IDENTITY : k->content_encoding) {
-         case IDENTITY:
+          switch (conn->data->set.http_ce_skip ?
+                  IDENTITY : k->content_encoding) {
+          case IDENTITY:
 #endif
-           /* This is the default when the server sends no
-              Content-Encoding header. See Curl_readwrite_init; the
-              memset() call initializes k->content_encoding to zero. */
-           if(!k->ignorebody)
-             result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
-                                        nread);
+            /* This is the default when the server sends no
+               Content-Encoding header. See Curl_readwrite_init; the
+               memset() call initializes k->content_encoding to zero. */
+            if(!k->ignorebody)
+              result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
+                                         nread);
 #ifdef HAVE_LIBZ
-           break;
-
-         case DEFLATE:
-           /* Assume CLIENTWRITE_BODY; headers are not encoded. */
-           if(!k->ignorebody)
-             result = Curl_unencode_deflate_write(conn, k, nread);
-           break;
-
-         case GZIP:
-           /* Assume CLIENTWRITE_BODY; headers are not encoded. */
-           if(!k->ignorebody)
-             result = Curl_unencode_gzip_write(conn, k, nread);
-           break;
-
-         case COMPRESS:
-         default:
-           failf (data, "Unrecognized content encoding type. "
-                  "libcurl understands `identity', `deflate' and `gzip' "
-                  "content encodings.");
-           result = CURLE_BAD_CONTENT_ENCODING;
-           break;
-         }
+            break;
+
+          case DEFLATE:
+            /* Assume CLIENTWRITE_BODY; headers are not encoded. */
+            if(!k->ignorebody)
+              result = Curl_unencode_deflate_write(conn, k, nread);
+            break;
+
+          case GZIP:
+            /* Assume CLIENTWRITE_BODY; headers are not encoded. */
+            if(!k->ignorebody)
+              result = Curl_unencode_gzip_write(conn, k, nread);
+            break;
+
+          case COMPRESS:
+          default:
+            failf (data, "Unrecognized content encoding type. "
+                   "libcurl understands `identity', `deflate' and `gzip' "
+                   "content encodings.");
+            result = CURLE_BAD_CONTENT_ENCODING;
+            break;
+          }
 #endif
-       }
-       k->badheader = HEADER_NORMAL; /* taken care of now */
+        }
+        k->badheader = HEADER_NORMAL; /* taken care of now */
 
-       if(result)
-         return result;
+        if(result)
+          return result;
       }
 
     } /* if(! header and data to read ) */
 
     if(is_empty_data) {
       /* if we received nothing, the server closed the connection and we
-        are done */
+         are done */
       k->keepon &= ~KEEP_READ;
     }
 
@@ -711,34 +711,34 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
 
     if(!k->end_ptr) {
       /* Not a complete header line within buffer, append the data to
-        the end of the headerbuff. */
+         the end of the headerbuff. */
 
       if(k->hbuflen + *nread >= data->state.headersize) {
-       /* We enlarge the header buffer as it is too small */
-       char *newbuff;
-       size_t newsize=CURLMAX((k->hbuflen+*nread)*3/2,
-                              data->state.headersize*2);
-       hbufp_index = k->hbufp - data->state.headerbuff;
-       newbuff = realloc(data->state.headerbuff, newsize);
-       if(!newbuff) {
-         failf (data, "Failed to alloc memory for big header!");
-         return CURLE_OUT_OF_MEMORY;
-       }
-       data->state.headersize=newsize;
-       data->state.headerbuff = newbuff;
-       k->hbufp = data->state.headerbuff + hbufp_index;
+        /* We enlarge the header buffer as it is too small */
+        char *newbuff;
+        size_t newsize=CURLMAX((k->hbuflen+*nread)*3/2,
+                               data->state.headersize*2);
+        hbufp_index = k->hbufp - data->state.headerbuff;
+        newbuff = realloc(data->state.headerbuff, newsize);
+        if(!newbuff) {
+          failf (data, "Failed to alloc memory for big header!");
+          return CURLE_OUT_OF_MEMORY;
+        }
+        data->state.headersize=newsize;
+        data->state.headerbuff = newbuff;
+        k->hbufp = data->state.headerbuff + hbufp_index;
       }
       memcpy(k->hbufp, k->str, *nread);
       k->hbufp += *nread;
       k->hbuflen += *nread;
       if(!k->headerline && (k->hbuflen>5)) {
-       /* make a first check that this looks like a HTTP header */
-       if(!checkhttpprefix(data, data->state.headerbuff)) {
-         /* this is not the beginning of a HTTP first header line */
-         k->header = FALSE;
-         k->badheader = HEADER_ALLBAD;
-         break;
-       }
+        /* make a first check that this looks like a HTTP header */
+        if(!checkhttpprefix(data, data->state.headerbuff)) {
+          /* this is not the beginning of a HTTP first header line */
+          k->header = FALSE;
+          k->badheader = HEADER_ALLBAD;
+          break;
+        }
       }
 
       break; /* read more and try again */
@@ -762,12 +762,12 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
        data->state.headersize) {
       char *newbuff;
       size_t newsize=CURLMAX((k->hbuflen+full_length)*3/2,
-                            data->state.headersize*2);
+                             data->state.headersize*2);
       hbufp_index = k->hbufp - data->state.headerbuff;
       newbuff = realloc(data->state.headerbuff, newsize);
       if(!newbuff) {
-       failf (data, "Failed to alloc memory for big header!");
-       return CURLE_OUT_OF_MEMORY;
+        failf (data, "Failed to alloc memory for big header!");
+        return CURLE_OUT_OF_MEMORY;
       }
       data->state.headersize= newsize;
       data->state.headerbuff = newbuff;
@@ -790,18 +790,18 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
     if(!k->headerline) {
       /* the first read header */
       if((k->hbuflen>5) &&
-        !checkhttpprefix(data, data->state.headerbuff)) {
-       /* this is not the beginning of a HTTP first header line */
-       k->header = FALSE;
-       if(*nread)
-         /* since there's more, this is a partial bad header */
-         k->badheader = HEADER_PARTHEADER;
-       else {
-         /* this was all we read so it's all a bad header */
-         k->badheader = HEADER_ALLBAD;
-         *nread = (ssize_t)rest_length;
-       }
-       break;
+         !checkhttpprefix(data, data->state.headerbuff)) {
+        /* this is not the beginning of a HTTP first header line */
+        k->header = FALSE;
+        if(*nread)
+          /* since there's more, this is a partial bad header */
+          k->badheader = HEADER_PARTHEADER;
+        else {
+          /* this was all we read so it's all a bad header */
+          k->badheader = HEADER_ALLBAD;
+          *nread = (ssize_t)rest_length;
+        }
+        break;
       }
     }
 
@@ -813,62 +813,62 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
 
 #ifdef CURL_DOES_CONVERSIONS
       if(0x0d == *k->p) {
-       *k->p = '\r'; /* replace with CR in host encoding */
-       k->p++;       /* pass the CR byte */
+        *k->p = '\r'; /* replace with CR in host encoding */
+        k->p++;       /* pass the CR byte */
       }
       if(0x0a == *k->p) {
-       *k->p = '\n'; /* replace with LF in host encoding */
-       k->p++;       /* pass the LF byte */
+        *k->p = '\n'; /* replace with LF in host encoding */
+        k->p++;       /* pass the LF byte */
       }
 #else
       if('\r' == *k->p)
-       k->p++; /* pass the \r byte */
+        k->p++; /* pass the \r byte */
       if('\n' == *k->p)
-       k->p++; /* pass the \n byte */
+        k->p++; /* pass the \n byte */
 #endif /* CURL_DOES_CONVERSIONS */
 
       if(100 <= k->httpcode && 199 >= k->httpcode) {
-       /*
-        * We have made a HTTP PUT or POST and this is 1.1-lingo
-        * that tells us that the server is OK with this and ready
-        * to receive the data.
-        * However, we'll get more headers now so we must get
-        * back into the header-parsing state!
-        */
-       k->header = TRUE;
-       k->headerline = 0; /* restart the header line counter */
-
-       /* if we did wait for this do enable write now! */
-       if(k->exp100) {
-         k->exp100 = EXP100_SEND_DATA;
-         k->keepon |= KEEP_WRITE;
-       }
+        /*
+         * We have made a HTTP PUT or POST and this is 1.1-lingo
+         * that tells us that the server is OK with this and ready
+         * to receive the data.
+         * However, we'll get more headers now so we must get
+         * back into the header-parsing state!
+         */
+        k->header = TRUE;
+        k->headerline = 0; /* restart the header line counter */
+
+        /* if we did wait for this do enable write now! */
+        if(k->exp100) {
+          k->exp100 = EXP100_SEND_DATA;
+          k->keepon |= KEEP_WRITE;
+        }
       }
       else {
-       k->header = FALSE; /* no more header to parse! */
-
-       if((k->size == -1) && !k->chunk && !conn->bits.close &&
-          (conn->httpversion >= 11) ) {
-         /* On HTTP 1.1, when connection is not to get closed, but no
-            Content-Length nor Content-Encoding chunked have been
-            received, according to RFC2616 section 4.4 point 5, we
-            assume that the server will close the connection to
-            signal the end of the document. */
-         infof(data, "no chunk, no close, no size. Assume close to "
-               "signal end\n");
-         conn->bits.close = TRUE;
-       }
+        k->header = FALSE; /* no more header to parse! */
+
+        if((k->size == -1) && !k->chunk && !conn->bits.close &&
+           (conn->httpversion >= 11) ) {
+          /* On HTTP 1.1, when connection is not to get closed, but no
+             Content-Length nor Content-Encoding chunked have been
+             received, according to RFC2616 section 4.4 point 5, we
+             assume that the server will close the connection to
+             signal the end of the document. */
+          infof(data, "no chunk, no close, no size. Assume close to "
+                "signal end\n");
+          conn->bits.close = TRUE;
+        }
       }
 
       if(417 == k->httpcode) {
-       /*
-        * we got: "417 Expectation Failed" this means:
-        * we have made a HTTP call and our Expect Header
-        * seems to cause a problem => abort the write operations
-        * (or prevent them from starting).
-        */
-       k->exp100 = EXP100_FAILED;
-       k->keepon &= ~KEEP_WRITE;
+        /*
+         * we got: "417 Expectation Failed" this means:
+         * we have made a HTTP call and our Expect Header
+         * seems to cause a problem => abort the write operations
+         * (or prevent them from starting).
+         */
+        k->exp100 = EXP100_FAILED;
+        k->keepon &= ~KEEP_WRITE;
       }
 
       /*
@@ -876,108 +876,108 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
        * up and return an error.
        */
       if(Curl_http_should_fail(conn)) {
-       failf (data, "The requested URL returned error: %d",
-              k->httpcode);
-       return CURLE_HTTP_RETURNED_ERROR;
+        failf (data, "The requested URL returned error: %d",
+               k->httpcode);
+        return CURLE_HTTP_RETURNED_ERROR;
       }
 
       /* now, only output this if the header AND body are requested:
        */
       writetype = CLIENTWRITE_HEADER;
       if(data->set.include_header)
-       writetype |= CLIENTWRITE_BODY;
+        writetype |= CLIENTWRITE_BODY;
 
       headerlen = k->p - data->state.headerbuff;
 
       result = Curl_client_write(conn, writetype,
-                                data->state.headerbuff,
-                                headerlen);
+                                 data->state.headerbuff,
+                                 headerlen);
       if(result)
-       return result;
+        return result;
 
       data->info.header_size += (long)headerlen;
       data->req.headerbytecount += (long)headerlen;
 
       data->req.deductheadercount =
-       (100 <= k->httpcode && 199 >= k->httpcode)?data->req.headerbytecount:0;
+        (100 <= k->httpcode && 199 >= k->httpcode)?data->req.headerbytecount:0;
 
       if(data->state.resume_from &&
-        (data->set.httpreq==HTTPREQ_GET) &&
-        (k->httpcode == 416)) {
-       /* "Requested Range Not Satisfiable" */
-       *stop_reading = TRUE;
+         (data->set.httpreq==HTTPREQ_GET) &&
+         (k->httpcode == 416)) {
+        /* "Requested Range Not Satisfiable" */
+        *stop_reading = TRUE;
       }
 
       if(!*stop_reading) {
-       /* Curl_http_auth_act() checks what authentication methods
-        * that are available and decides which one (if any) to
-        * use. It will set 'newurl' if an auth method was picked. */
-       result = Curl_http_auth_act(conn);
-
-       if(result)
-         return result;
-
-       if(conn->bits.rewindaftersend) {
-         /* We rewind after a complete send, so thus we continue
-            sending now */
-         infof(data, "Keep sending data to get tossed away!\n");
-         k->keepon |= KEEP_WRITE;
-       }
+        /* Curl_http_auth_act() checks what authentication methods
+         * that are available and decides which one (if any) to
+         * use. It will set 'newurl' if an auth method was picked. */
+        result = Curl_http_auth_act(conn);
+
+        if(result)
+          return result;
+
+        if(conn->bits.rewindaftersend) {
+          /* We rewind after a complete send, so thus we continue
+             sending now */
+          infof(data, "Keep sending data to get tossed away!\n");
+          k->keepon |= KEEP_WRITE;
+        }
       }
 
       if(!k->header) {
-       /*
-        * really end-of-headers.
-        *
-        * If we requested a "no body", this is a good time to get
-        * out and return home.
-        */
-       if(data->set.opt_no_body)
-         *stop_reading = TRUE;
-       else {
-         /* If we know the expected size of this document, we set the
-            maximum download size to the size of the expected
-            document or else, we won't know when to stop reading!
-
-            Note that we set the download maximum even if we read a
-            "Connection: close" header, to make sure that
-            "Content-Length: 0" still prevents us from attempting to
-            read the (missing) response-body.
-         */
-         /* According to RFC2616 section 4.4, we MUST ignore
-            Content-Length: headers if we are now receiving data
-            using chunked Transfer-Encoding.
-         */
-         if(k->chunk)
-           k->size=-1;
-
-       }
-       if(-1 != k->size) {
-         /* We do this operation even if no_body is true, since this
-            data might be retrieved later with curl_easy_getinfo()
-            and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
-
-         Curl_pgrsSetDownloadSize(data, k->size);
-         k->maxdownload = k->size;
-       }
-       /* If max download size is *zero* (nothing) we already
-          have nothing and can safely return ok now! */
-       if(0 == k->maxdownload)
-         *stop_reading = TRUE;
-
-       if(*stop_reading) {
-         /* we make sure that this socket isn't read more now */
-         k->keepon &= ~KEEP_READ;
-       }
-
-       if(data->set.verbose)
-         Curl_debug(data, CURLINFO_HEADER_IN,
-                    k->str_start, headerlen, conn);
-       break;          /* exit header line loop */
+        /*
+         * really end-of-headers.
+         *
+         * If we requested a "no body", this is a good time to get
+         * out and return home.
+         */
+        if(data->set.opt_no_body)
+          *stop_reading = TRUE;
+        else {
+          /* If we know the expected size of this document, we set the
+             maximum download size to the size of the expected
+             document or else, we won't know when to stop reading!
+
+             Note that we set the download maximum even if we read a
+             "Connection: close" header, to make sure that
+             "Content-Length: 0" still prevents us from attempting to
+             read the (missing) response-body.
+          */
+          /* According to RFC2616 section 4.4, we MUST ignore
+             Content-Length: headers if we are now receiving data
+             using chunked Transfer-Encoding.
+          */
+          if(k->chunk)
+            k->size=-1;
+
+        }
+        if(-1 != k->size) {
+          /* We do this operation even if no_body is true, since this
+             data might be retrieved later with curl_easy_getinfo()
+             and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
+
+          Curl_pgrsSetDownloadSize(data, k->size);
+          k->maxdownload = k->size;
+        }
+        /* If max download size is *zero* (nothing) we already
+           have nothing and can safely return ok now! */
+        if(0 == k->maxdownload)
+          *stop_reading = TRUE;
+
+        if(*stop_reading) {
+          /* we make sure that this socket isn't read more now */
+          k->keepon &= ~KEEP_READ;
+        }
+
+        if(data->set.verbose)
+          Curl_debug(data, CURLINFO_HEADER_IN,
+                     k->str_start, headerlen, conn);
+        break;          /* exit header line loop */
       }
 
       /* We continue reading headers, so reset the line-based
-        header parsing variables hbufp && hbuflen */
+         header parsing variables hbufp && hbuflen */
       k->hbufp = data->state.headerbuff;
       k->hbuflen = 0;
       continue;
@@ -989,7 +989,7 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
 
     if(!k->headerline++) {
       /* This is the first header, it MUST be the error code line
-        or else we consider this to be the body right away! */
+         or else we consider this to be the body right away! */
       int httpversion_major;
       int nc;
 #ifdef CURL_DOES_CONVERSIONS
@@ -998,131 +998,131 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
       CURLcode res;
       char scratch[SCRATCHSIZE+1]; /* "HTTP/major.minor 123" */
       /* We can't really convert this yet because we
-        don't know if it's the 1st header line or the body.
-        So we do a partial conversion into a scratch area,
-        leaving the data at k->p as-is.
+         don't know if it's the 1st header line or the body.
+         So we do a partial conversion into a scratch area,
+         leaving the data at k->p as-is.
       */
       strncpy(&scratch[0], k->p, SCRATCHSIZE);
       scratch[SCRATCHSIZE] = 0; /* null terminate */
       res = Curl_convert_from_network(data,
-                                     &scratch[0],
-                                     SCRATCHSIZE);
+                                      &scratch[0],
+                                      SCRATCHSIZE);
       if(CURLE_OK != res) {
-       /* Curl_convert_from_network calls failf if unsuccessful */
-       return res;
+        /* Curl_convert_from_network calls failf if unsuccessful */
+        return res;
       }
 #else
 #define HEADER1 k->p /* no conversion needed, just use k->p */
 #endif /* CURL_DOES_CONVERSIONS */
 
       nc = sscanf(HEADER1,
-                 " HTTP/%d.%d %3d",
-                 &httpversion_major,
-                 &conn->httpversion,
-                 &k->httpcode);
+                  " HTTP/%d.%d %3d",
+                  &httpversion_major,
+                  &conn->httpversion,
+                  &k->httpcode);
       if(nc==3) {
-       conn->httpversion += 10 * httpversion_major;
+        conn->httpversion += 10 * httpversion_major;
       }
       else {
-       /* this is the real world, not a Nirvana
-          NCSA 1.5.x returns this crap when asked for HTTP/1.1
-       */
-       nc=sscanf(HEADER1, " HTTP %3d", &k->httpcode);
-       conn->httpversion = 10;
-
-       /* If user has set option HTTP200ALIASES,
-          compare header line against list of aliases
-       */
-       if(!nc) {
-         if(checkhttpprefix(data, k->p)) {
-           nc = 1;
-           k->httpcode = 200;
-           conn->httpversion = 10;
-         }
-       }
+        /* this is the real world, not a Nirvana
+           NCSA 1.5.x returns this crap when asked for HTTP/1.1
+        */
+        nc=sscanf(HEADER1, " HTTP %3d", &k->httpcode);
+        conn->httpversion = 10;
+
+        /* If user has set option HTTP200ALIASES,
+           compare header line against list of aliases
+        */
+        if(!nc) {
+          if(checkhttpprefix(data, k->p)) {
+            nc = 1;
+            k->httpcode = 200;
+            conn->httpversion = 10;
+          }
+        }
       }
 
       if(nc) {
-       data->info.httpcode = k->httpcode;
-       data->info.httpversion = conn->httpversion;
+        data->info.httpcode = k->httpcode;
+        data->info.httpversion = conn->httpversion;
         if (!data->state.httpversion ||
             data->state.httpversion > conn->httpversion)
           /* store the lowest server version we encounter */
           data->state.httpversion = conn->httpversion;
 
-       /*
-        * This code executes as part of processing the header.  As a
-        * result, it's not totally clear how to interpret the
-        * response code yet as that depends on what other headers may
-        * be present.  401 and 407 may be errors, but may be OK
-        * depending on how authentication is working.  Other codes
-        * are definitely errors, so give up here.
-        */
-       if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
-          ((k->httpcode != 401) || !conn->bits.user_passwd) &&
-          ((k->httpcode != 407) || !conn->bits.proxy_user_passwd) ) {
-
-         if(data->state.resume_from &&
-            (data->set.httpreq==HTTPREQ_GET) &&
-            (k->httpcode == 416)) {
-           /* "Requested Range Not Satisfiable", just proceed and
-              pretend this is no error */
-         }
-         else {
-           /* serious error, go home! */
-           failf (data, "The requested URL returned error: %d",
-                  k->httpcode);
-           return CURLE_HTTP_RETURNED_ERROR;
-         }
-       }
-
-       if(conn->httpversion == 10) {
-         /* Default action for HTTP/1.0 must be to close, unless
-            we get one of those fancy headers that tell us the
-            server keeps it open for us! */
-         infof(data, "HTTP 1.0, assume close after body\n");
-         conn->bits.close = TRUE;
-       }
-       else if(conn->httpversion >= 11 &&
-               !conn->bits.close) {
-         /* If HTTP version is >= 1.1 and connection is persistent
-            server supports pipelining. */
-         DEBUGF(infof(data,
-                      "HTTP 1.1 or later with persistent connection, "
-                      "pipelining supported\n"));
-         conn->server_supports_pipelining = TRUE;
-       }
-
-       switch(k->httpcode) {
-       case 204:
-         /* (quote from RFC2616, section 10.2.5): The server has
-          * fulfilled the request but does not need to return an
-          * entity-body ... The 204 response MUST NOT include a
-          * message-body, and thus is always terminated by the first
-          * empty line after the header fields. */
-         /* FALLTHROUGH */
-       case 416: /* Requested Range Not Satisfiable, it has the
-                    Content-Length: set as the "real" document but no
-                    actual response is sent. */
-       case 304:
-         /* (quote from RFC2616, section 10.3.5): The 304 response
-          * MUST NOT contain a message-body, and thus is always
-          * terminated by the first empty line after the header
-          * fields.  */
+        /*
+         * This code executes as part of processing the header.  As a
+         * result, it's not totally clear how to interpret the
+         * response code yet as that depends on what other headers may
+         * be present.  401 and 407 may be errors, but may be OK
+         * depending on how authentication is working.  Other codes
+         * are definitely errors, so give up here.
+         */
+        if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
+           ((k->httpcode != 401) || !conn->bits.user_passwd) &&
+           ((k->httpcode != 407) || !conn->bits.proxy_user_passwd) ) {
+
+          if(data->state.resume_from &&
+             (data->set.httpreq==HTTPREQ_GET) &&
+             (k->httpcode == 416)) {
+            /* "Requested Range Not Satisfiable", just proceed and
+               pretend this is no error */
+          }
+          else {
+            /* serious error, go home! */
+            failf (data, "The requested URL returned error: %d",
+                   k->httpcode);
+            return CURLE_HTTP_RETURNED_ERROR;
+          }
+        }
+
+        if(conn->httpversion == 10) {
+          /* Default action for HTTP/1.0 must be to close, unless
+             we get one of those fancy headers that tell us the
+             server keeps it open for us! */
+          infof(data, "HTTP 1.0, assume close after body\n");
+          conn->bits.close = TRUE;
+        }
+        else if(conn->httpversion >= 11 &&
+                !conn->bits.close) {
+          /* If HTTP version is >= 1.1 and connection is persistent
+             server supports pipelining. */
+          DEBUGF(infof(data,
+                       "HTTP 1.1 or later with persistent connection, "
+                       "pipelining supported\n"));
+          conn->server_supports_pipelining = TRUE;
+        }
+
+        switch(k->httpcode) {
+        case 204:
+          /* (quote from RFC2616, section 10.2.5): The server has
+           * fulfilled the request but does not need to return an
+           * entity-body ... The 204 response MUST NOT include a
+           * message-body, and thus is always terminated by the first
+           * empty line after the header fields. */
+          /* FALLTHROUGH */
+        case 416: /* Requested Range Not Satisfiable, it has the
+                     Content-Length: set as the "real" document but no
+                     actual response is sent. */
+        case 304:
+          /* (quote from RFC2616, section 10.3.5): The 304 response
+           * MUST NOT contain a message-body, and thus is always
+           * terminated by the first empty line after the header
+           * fields.  */
           if(data->set.timecondition)
             data->info.timecond = TRUE;
-         k->size=0;
-         k->maxdownload=0;
-         k->ignorecl = TRUE; /* ignore Content-Length headers */
-         break;
-       default:
-         /* nothing */
-         break;
-       }
+          k->size=0;
+          k->maxdownload=0;
+          k->ignorecl = TRUE; /* ignore Content-Length headers */
+          break;
+        default:
+          /* nothing */
+          break;
+        }
       }
       else {
-       k->header = FALSE;   /* this is not a header line */
-       break;
+        k->header = FALSE;   /* this is not a header line */
+        break;
       }
     }
 
@@ -1143,44 +1143,44 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
        checkprefix("Content-Length:", k->p)) {
       curl_off_t contentlength = curlx_strtoofft(k->p+15, NULL, 10);
       if(data->set.max_filesize &&
-        contentlength > data->set.max_filesize) {
-       failf(data, "Maximum file size exceeded");
-       return CURLE_FILESIZE_EXCEEDED;
+         contentlength > data->set.max_filesize) {
+        failf(data, "Maximum file size exceeded");
+        return CURLE_FILESIZE_EXCEEDED;
       }
       if(contentlength >= 0) {
-       k->size = contentlength;
-       k->maxdownload = k->size;
-       /* we set the progress download size already at this point
-          just to make it easier for apps/callbacks to extract this
-          info as soon as possible */
-       Curl_pgrsSetDownloadSize(data, k->size);
+        k->size = contentlength;
+        k->maxdownload = k->size;
+        /* we set the progress download size already at this point
+           just to make it easier for apps/callbacks to extract this
+           info as soon as possible */
+        Curl_pgrsSetDownloadSize(data, k->size);
       }
       else {
-       /* Negative Content-Length is really odd, and we know it
-          happens for example when older Apache servers send large
-          files */
-       conn->bits.close = TRUE;
-       infof(data, "Negative content-length: %" FORMAT_OFF_T
-             ", closing after transfer\n", contentlength);
+        /* Negative Content-Length is really odd, and we know it
+           happens for example when older Apache servers send large
+           files */
+        conn->bits.close = TRUE;
+        infof(data, "Negative content-length: %" FORMAT_OFF_T
+              ", closing after transfer\n", contentlength);
       }
     }
     /* check for Content-Type: header lines to get the MIME-type */
     else if(checkprefix("Content-Type:", k->p)) {
       char *contenttype = Curl_copy_header_value(k->p);
       if (!contenttype)
-       return CURLE_OUT_OF_MEMORY;
+        return CURLE_OUT_OF_MEMORY;
       if (!*contenttype)
-       /* ignore empty data */
-       free(contenttype);
+        /* ignore empty data */
+        free(contenttype);
       else {
-       Curl_safefree(data->info.contenttype);
-       data->info.contenttype = contenttype;
+        Curl_safefree(data->info.contenttype);
+        data->info.contenttype = contenttype;
       }
     }
     else if((conn->httpversion == 10) &&
-           conn->bits.httpproxy &&
-           Curl_compareheader(k->p,
-                              "Proxy-Connection:", "keep-alive")) {
+            conn->bits.httpproxy &&
+            Curl_compareheader(k->p,
+                               "Proxy-Connection:", "keep-alive")) {
       /*
        * When a HTTP/1.0 reply comes when using a proxy, the
        * 'Proxy-Connection: keep-alive' line tells us the
@@ -1191,9 +1191,9 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
       infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
     }
     else if((conn->httpversion == 11) &&
-           conn->bits.httpproxy &&
-           Curl_compareheader(k->p,
-                              "Proxy-Connection:", "close")) {
+            conn->bits.httpproxy &&
+            Curl_compareheader(k->p,
+                               "Proxy-Connection:", "close")) {
       /*
        * We get a HTTP/1.1 response from a proxy and it says it'll
        * close down after this transfer.
@@ -1202,7 +1202,7 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
       infof(data, "HTTP/1.1 proxy connection set close!\n");
     }
     else if((conn->httpversion == 10) &&
-           Curl_compareheader(k->p, "Connection:", "keep-alive")) {
+            Curl_compareheader(k->p, "Connection:", "keep-alive")) {
       /*
        * A HTTP/1.0 reply with the 'Connection: keep-alive' line
        * tells us the connection will be kept alive for our
@@ -1222,7 +1222,7 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
       conn->bits.close = TRUE; /* close when done */
     }
     else if(Curl_compareheader(k->p,
-                              "Transfer-Encoding:", "chunked")) {
+                               "Transfer-Encoding:", "chunked")) {
       /*
        * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
        * means that the server will send a series of "chunks". Each
@@ -1238,7 +1238,7 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
     }
 
     else if(checkprefix("Trailer:", k->p) ||
-           checkprefix("Trailers:", k->p)) {
+            checkprefix("Trailers:", k->p)) {
       /*
        * This test helps Curl_httpchunk_read() to determine to look
        * for well formed trailers after the zero chunksize record. In
@@ -1251,7 +1251,7 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
     }
 
     else if(checkprefix("Content-Encoding:", k->p) &&
-           data->set.str[STRING_ENCODING]) {
+            data->set.str[STRING_ENCODING]) {
       /*
        * Process Content-Encoding. Look for the values: identity,
        * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
@@ -1268,94 +1268,94 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
 
       /* Record the content-encoding for later use */
       if(checkprefix("identity", start))
-       k->content_encoding = IDENTITY;
+        k->content_encoding = IDENTITY;
       else if(checkprefix("deflate", start))
-       k->content_encoding = DEFLATE;
+        k->content_encoding = DEFLATE;
       else if(checkprefix("gzip", start)
-             || checkprefix("x-gzip", start))
-       k->content_encoding = GZIP;
+              || checkprefix("x-gzip", start))
+        k->content_encoding = GZIP;
       else if(checkprefix("compress", start)
-             || checkprefix("x-compress", start))
-       k->content_encoding = COMPRESS;
+              || checkprefix("x-compress", start))
+        k->content_encoding = COMPRESS;
     }
     else if(checkprefix("Content-Range:", k->p)) {
       /* Content-Range: bytes [num]-
-        Content-Range: bytes: [num]-
-        Content-Range: [num]-
+         Content-Range: bytes: [num]-
+         Content-Range: [num]-
 
-        The second format was added since Sun's webserver
-        JavaWebServer/1.1.1 obviously sends the header this way!
-        The third added since some servers use that!
+         The second format was added since Sun's webserver
+         JavaWebServer/1.1.1 obviously sends the header this way!
+         The third added since some servers use that!
       */
 
       char *ptr = k->p + 14;
 
       /* Move forward until first digit */
       while(*ptr && !ISDIGIT(*ptr))
-       ptr++;
+        ptr++;
 
       k->offset = curlx_strtoofft(ptr, NULL, 10);
 
       if(data->state.resume_from == k->offset)
-       /* we asked for a resume and we got it */
-       k->content_range = TRUE;
+        /* we asked for a resume and we got it */
+        k->content_range = TRUE;
     }
 #if !defined(CURL_DISABLE_COOKIES)
     else if(data->cookies &&
-           checkprefix("Set-Cookie:", k->p)) {
+            checkprefix("Set-Cookie:", k->p)) {
       Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
-                     CURL_LOCK_ACCESS_SINGLE);
+                      CURL_LOCK_ACCESS_SINGLE);
       Curl_cookie_add(data,
-                     data->cookies, TRUE, k->p+11,
-                     /* If there is a custom-set Host: name, use it
-                        here, or else use real peer host name. */
-                     conn->allocptr.cookiehost?
-                     conn->allocptr.cookiehost:conn->host.name,
-                     data->state.path);
+                      data->cookies, TRUE, k->p+11,
+                      /* If there is a custom-set Host: name, use it
+                         here, or else use real peer host name. */
+                      conn->allocptr.cookiehost?
+                      conn->allocptr.cookiehost:conn->host.name,
+                      data->state.path);
       Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
     }
 #endif
     else if(checkprefix("Last-Modified:", k->p) &&
-           (data->set.timecondition || data->set.get_filetime) ) {
+            (data->set.timecondition || data->set.get_filetime) ) {
       time_t secs=time(NULL);
       k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
-                                 &secs);
+                                  &secs);
       if(data->set.get_filetime)
-       data->info.filetime = (long)k->timeofdoc;
+        data->info.filetime = (long)k->timeofdoc;
     }
     else if((checkprefix("WWW-Authenticate:", k->p) &&
-            (401 == k->httpcode)) ||
-           (checkprefix("Proxy-authenticate:", k->p) &&
-            (407 == k->httpcode))) {
+             (401 == k->httpcode)) ||
+            (checkprefix("Proxy-authenticate:", k->p) &&
+             (407 == k->httpcode))) {
       result = Curl_http_input_auth(conn, k->httpcode, k->p);
       if(result)
-       return result;
+        return result;
     }
     else if((k->httpcode >= 300 && k->httpcode < 400) &&
-           checkprefix("Location:", k->p)) {
+            checkprefix("Location:", k->p)) {
       /* this is the URL that the server advises us to use instead */
       char *location = Curl_copy_header_value(k->p);
       if (!location)
-       return CURLE_OUT_OF_MEMORY;
+        return CURLE_OUT_OF_MEMORY;
       if (!*location)
-       /* ignore empty data */
-       free(location);
+        /* ignore empty data */
+        free(location);
       else {
-       DEBUGASSERT(!data->req.location);
-       data->req.location = location;
-
-       if(data->set.http_follow_location) {
-         DEBUGASSERT(!data->req.newurl);
-         data->req.newurl = strdup(data->req.location); /* clone */
-         if(!data->req.newurl)
-           return CURLE_OUT_OF_MEMORY;
-
-         /* some cases of POST and PUT etc needs to rewind the data
-            stream at this point */
-         result = Curl_http_perhapsrewind(conn);
-         if(result)
-           return result;
-       }
+        DEBUGASSERT(!data->req.location);
+        data->req.location = location;
+
+        if(data->set.http_follow_location) {
+          DEBUGASSERT(!data->req.newurl);
+          data->req.newurl = strdup(data->req.location); /* clone */
+          if(!data->req.newurl)
+            return CURLE_OUT_OF_MEMORY;
+
+          /* some cases of POST and PUT etc needs to rewind the data
+             stream at this point */
+          result = Curl_http_perhapsrewind(conn);
+          if(result)
+            return result;
+        }
       }
     }
 
@@ -1369,7 +1369,7 @@ static CURLcode readwrite_http_headers(struct SessionHandle *data,
 
     if(data->set.verbose)
       Curl_debug(data, CURLINFO_HEADER_IN,
-                k->p, (size_t)k->hbuflen, conn);
+                 k->p, (size_t)k->hbuflen, conn);
 
     result = Curl_client_write(conn, writetype, k->p, k->hbuflen);
     if(result)
@@ -1423,45 +1423,45 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
       data->req.upload_fromhere = k->uploadbuf;
 
       if(!k->upload_done) {
-       /* HTTP pollution, this should be written nicer to become more
-          protocol agnostic. */
-       int fillcount;
-
-       if((k->exp100 == EXP100_SENDING_REQUEST) &&
-          (data->state.proto.http->sending == HTTPSEND_BODY)) {
-         /* If this call is to send body data, we must take some action:
-            We have sent off the full HTTP 1.1 request, and we shall now
-            go into the Expect: 100 state and await such a header */
-         k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
-         k->keepon &= ~KEEP_WRITE;         /* disable writing */
-         k->start100 = Curl_tvnow();       /* timeout count starts now */
-         *didwhat &= ~KEEP_WRITE;  /* we didn't write anything actually */
-         break;
-       }
-
-       result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
-       if(result)
-         return result;
-
-       nread = (ssize_t)fillcount;
+        /* HTTP pollution, this should be written nicer to become more
+           protocol agnostic. */
+        int fillcount;
+
+        if((k->exp100 == EXP100_SENDING_REQUEST) &&
+           (data->state.proto.http->sending == HTTPSEND_BODY)) {
+          /* If this call is to send body data, we must take some action:
+             We have sent off the full HTTP 1.1 request, and we shall now
+             go into the Expect: 100 state and await such a header */
+          k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
+          k->keepon &= ~KEEP_WRITE;         /* disable writing */
+          k->start100 = Curl_tvnow();       /* timeout count starts now */
+          *didwhat &= ~KEEP_WRITE;  /* we didn't write anything actually */
+          break;
+        }
+
+        result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
+        if(result)
+          return result;
+
+        nread = (ssize_t)fillcount;
       }
       else
-       nread = 0; /* we're done uploading/reading */
+        nread = 0; /* we're done uploading/reading */
 
       if(!nread && (k->keepon & KEEP_WRITE_PAUSE)) {
-       /* this is a paused transfer */
-       break;
+        /* this is a paused transfer */
+        break;
       }
       else if(nread<=0) {
-       /* done */
-       k->keepon &= ~KEEP_WRITE; /* we're done writing */
-
-       if(conn->bits.rewindaftersend) {
-         result = Curl_readrewind(conn);
-         if(result)
-           return result;
-       }
-       break;
+        /* done */
+        k->keepon &= ~KEEP_WRITE; /* we're done writing */
+
+        if(conn->bits.rewindaftersend) {
+          result = Curl_readrewind(conn);
+          if(result)
+            return result;
+        }
+        break;
       }
 
       /* store number of bytes available for upload */
@@ -1472,58 +1472,58 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
       /* always convert if we're FTPing in ASCII mode */
       if((data->set.crlf) || (data->set.prefer_ascii))
 #else
-       if(data->set.crlf)
+        if(data->set.crlf)
 #endif /* CURL_DO_LINEEND_CONV */
-       {
-         if(data->state.scratch == NULL)
-           data->state.scratch = malloc(2*BUFSIZE);
-         if(data->state.scratch == NULL) {
-           failf (data, "Failed to alloc scratch buffer!");
-           return CURLE_OUT_OF_MEMORY;
-         }
-         /*
-          * ASCII/EBCDIC Note: This is presumably a text (not binary)
-          * transfer so the data should already be in ASCII.
-          * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
-          * must be used instead of the escape sequences \r & \n.
-          */
-         for(i = 0, si = 0; i < nread; i++, si++) {
-           if(data->req.upload_fromhere[i] == 0x0a) {
-             data->state.scratch[si++] = 0x0d;
-             data->state.scratch[si] = 0x0a;
-             if(!data->set.crlf) {
-               /* we're here only because FTP is in ASCII mode...
-                  bump infilesize for the LF we just added */
-               data->set.infilesize++;
-             }
-           }
-           else
-             data->state.scratch[si] = data->req.upload_fromhere[i];
-         }
-         if(si != nread) {
-           /* only perform the special operation if we really did replace
-              anything */
-           nread = si;
-
-           /* upload from the new (replaced) buffer instead */
-           data->req.upload_fromhere = data->state.scratch;
-
-           /* set the new amount too */
-           data->req.upload_present = nread;
-         }
-       }
+        {
+          if(data->state.scratch == NULL)
+            data->state.scratch = malloc(2*BUFSIZE);
+          if(data->state.scratch == NULL) {
+            failf (data, "Failed to alloc scratch buffer!");
+            return CURLE_OUT_OF_MEMORY;
+          }
+          /*
+           * ASCII/EBCDIC Note: This is presumably a text (not binary)
+           * transfer so the data should already be in ASCII.
+           * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
+           * must be used instead of the escape sequences \r & \n.
+           */
+          for(i = 0, si = 0; i < nread; i++, si++) {
+            if(data->req.upload_fromhere[i] == 0x0a) {
+              data->state.scratch[si++] = 0x0d;
+              data->state.scratch[si] = 0x0a;
+              if(!data->set.crlf) {
+                /* we're here only because FTP is in ASCII mode...
+                   bump infilesize for the LF we just added */
+                data->set.infilesize++;
+              }
+            }
+            else
+              data->state.scratch[si] = data->req.upload_fromhere[i];
+          }
+          if(si != nread) {
+            /* only perform the special operation if we really did replace
+               anything */
+            nread = si;
+
+            /* upload from the new (replaced) buffer instead */
+            data->req.upload_fromhere = data->state.scratch;
+
+            /* set the new amount too */
+            data->req.upload_present = nread;
+          }
+        }
     } /* if 0 == data->req.upload_present */
     else {
       /* We have a partial buffer left from a previous "round". Use
-        that instead of reading more data */
+         that instead of reading more data */
     }
 
     /* write to socket (send away data) */
     result = Curl_write(conn,
-                       conn->writesockfd,     /* socket to send to */
-                       data->req.upload_fromhere, /* buffer pointer */
-                       data->req.upload_present,  /* buffer size */
-                       &bytes_written);       /* actually send away */
+                        conn->writesockfd,     /* socket to send to */
+                        data->req.upload_fromhere, /* buffer pointer */
+                        data->req.upload_present,  /* buffer size */
+                        &bytes_written);       /* actually send away */
 
     if(result)
       return result;
@@ -1531,7 +1531,7 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
     if(data->set.verbose)
       /* show the data before we change the pointer upload_fromhere */
       Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
-                (size_t)bytes_written, conn);
+                 (size_t)bytes_written, conn);
 
     if(data->req.upload_present != bytes_written) {
       /* we only wrote a part of the buffer (if anything), deal with it! */
@@ -1540,7 +1540,7 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
       data->req.upload_present -= bytes_written;
 
       /* advance the pointer where to find the buffer when the next send
-        is to happen */
+         is to happen */
       data->req.upload_fromhere += bytes_written;
     }
     else {
@@ -1549,8 +1549,8 @@ static CURLcode readwrite_upload(struct SessionHandle *data,
       data->req.upload_present = 0; /* no more bytes left */
 
       if(k->upload_done) {
-       /* switch off writing, we're done! */
-       k->keepon &= ~KEEP_WRITE; /* we're done writing */
+        /* switch off writing, we're done! */
+        k->keepon &= ~KEEP_WRITE; /* we're done writing */
       }
     }
 
@@ -2097,7 +2097,7 @@ static char *concat_url(const char *base, const char *relurl)
     if(useurl[0] != '?') {
       pathsep = strrchr(protsep, '/');
       if(pathsep)
-       *pathsep=0;
+        *pathsep=0;
     }
 
     /* Check if there's any slash after the host name, and if so, remember
@@ -2115,22 +2115,22 @@ static char *concat_url(const char *base, const char *relurl)
       useurl+=2; /* just skip the "./" */
 
     while((useurl[0] == '.') &&
-         (useurl[1] == '.') &&
-         (useurl[2] == '/')) {
+          (useurl[1] == '.') &&
+          (useurl[2] == '/')) {
       level++;
       useurl+=3; /* pass the "../" */
     }
 
     if(protsep) {
       while(level--) {
-       /* cut off one more level from the right of the original URL */
-       pathsep = strrchr(protsep, '/');
-       if(pathsep)
-         *pathsep=0;
-       else {
-         *protsep=0;
-         break;
-       }
+        /* cut off one more level from the right of the original URL */
+        pathsep = strrchr(protsep, '/');
+        if(pathsep)
+          *pathsep=0;
+        else {
+          *protsep=0;
+          break;
+        }
       }
     }
   }
@@ -2140,21 +2140,21 @@ static char *concat_url(const char *base, const char *relurl)
     pathsep = strchr(protsep, '/');
     if(pathsep) {
       /* When people use badly formatted URLs, such as
-        "http://www.url.com?dir=/home/daniel" we must not use the first
-        slash, if there's a ?-letter before it! */
+         "http://www.url.com?dir=/home/daniel" we must not use the first
+         slash, if there's a ?-letter before it! */
       char *sep = strchr(protsep, '?');
       if(sep && (sep < pathsep))
-       pathsep = sep;
+        pathsep = sep;
       *pathsep=0;
     }
     else {
       /* There was no slash. Now, since we might be operating on a badly
-        formatted URL, such as "http://www.url.com?id=2380" which doesn't
-        use a slash separator as it is supposed to, we need to check for a
-        ?-letter as well! */
+         formatted URL, such as "http://www.url.com?id=2380" which doesn't
+         use a slash separator as it is supposed to, we need to check for a
+         ?-letter as well! */
       pathsep = strchr(protsep, '?');
       if(pathsep)
-       *pathsep=0;
+        *pathsep=0;
     }
   }
 
@@ -2168,7 +2168,7 @@ static char *concat_url(const char *base, const char *relurl)
   urllen = strlen(url_clone);
 
   newest = malloc( urllen + 1 + /* possible slash */
-                        newlen + 1 /* zero byte */);
+                         newlen + 1 /* zero byte */);
 
   if(!newest) {
     free(url_clone); /* don't leak this */
@@ -2267,7 +2267,7 @@ CURLcode Curl_follow(struct SessionHandle *data,
 
       newest = malloc(newlen+1); /* get memory for this */
       if (!newest)
-       return CURLE_OUT_OF_MEMORY;
+        return CURLE_OUT_OF_MEMORY;
       strcpy_url(newest, newurl); /* create a space-free URL */
 
       free(newurl); /* that was no good */