summary refs log tree commit diff
path: root/crypt/sysdeps/unix
diff options
context:
space:
mode:
Diffstat (limited to 'crypt/sysdeps/unix')
-rw-r--r--crypt/sysdeps/unix/Makefile4
-rw-r--r--crypt/sysdeps/unix/crypt-entry.c149
-rw-r--r--crypt/sysdeps/unix/crypt-private.h60
-rw-r--r--crypt/sysdeps/unix/crypt.c120
-rw-r--r--crypt/sysdeps/unix/crypt.h71
-rw-r--r--crypt/sysdeps/unix/crypt_util.c914
-rw-r--r--crypt/sysdeps/unix/des_impl.c615
-rw-r--r--crypt/sysdeps/unix/patchlevel.h25
-rw-r--r--crypt/sysdeps/unix/ufc-crypt.h29
9 files changed, 1987 insertions, 0 deletions
diff --git a/crypt/sysdeps/unix/Makefile b/crypt/sysdeps/unix/Makefile
new file mode 100644
index 0000000000..65810482d0
--- /dev/null
+++ b/crypt/sysdeps/unix/Makefile
@@ -0,0 +1,4 @@
+ifeq ($(subdir),md5-crypt)
+libcrypt-routines += crypt crypt_util
+dont_distribute += crypt.c crypt_util.c
+endif
diff --git a/crypt/sysdeps/unix/crypt-entry.c b/crypt/sysdeps/unix/crypt-entry.c
new file mode 100644
index 0000000000..9d99c53a2b
--- /dev/null
+++ b/crypt/sysdeps/unix/crypt-entry.c
@@ -0,0 +1,149 @@
+/*
+ * UFC-crypt: ultra fast crypt(3) implementation
+ *
+ * Copyright (C) 1991, 1992, 1993, 1996, 1997 Free Software Foundation, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; see the file COPYING.LIB.  If not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * crypt entry points
+ *
+ * @(#)crypt-entry.c	1.2 12/20/96
+ *
+ */
+
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+#include <string.h>
+
+#ifndef STATIC
+#define STATIC static
+#endif
+
+#ifndef DOS
+#include "ufc-crypt.h"
+#else
+/*
+ * Thanks to greg%wind@plains.NoDak.edu (Greg W. Wettstein)
+ * for DOS patches
+ */
+#include "ufc.h"
+#endif
+#include "crypt.h"
+#include "crypt-private.h"
+
+/* Prototypes for local functions.  */
+#if __STDC__ - 0
+#ifndef __GNU_LIBRARY__
+void _ufc_clearmem (char *start, int cnt);
+#else
+#define _ufc_clearmem(start, cnt)   memset(start, 0, cnt)
+#endif
+extern char *__md5_crypt_r (const char *key, const char *salt, char *buffer,
+			    int buflen);
+extern char *__md5_crypt (const char *key, const char *salt);
+#endif
+
+/* Define our magic string to mark salt for MD5 encryption
+   replacement.  This is meant to be the same as for other MD5 based
+   encryption implementations.  */
+static const char md5_salt_prefix[] = "$1$";
+
+/* For use by the old, non-reentrant routines (crypt/encrypt/setkey)  */
+extern struct crypt_data _ufc_foobar;
+
+/*
+ * UNIX crypt function
+ */
+
+char *
+__crypt_r (key, salt, data)
+     const char *key;
+     const char *salt;
+     struct crypt_data * __restrict data;
+{
+  ufc_long res[4];
+  char ktab[9];
+  ufc_long xx = 25; /* to cope with GCC long long compiler bugs */
+
+#ifdef _LIBC
+  /* Try to find out whether we have to use MD5 encryption replacement.  */
+  if (strncmp (md5_salt_prefix, salt, sizeof (md5_salt_prefix) - 1) == 0)
+    return __md5_crypt_r (key, salt, (char *) data,
+			  sizeof (struct crypt_data));
+#endif
+
+  /*
+   * Hack DES tables according to salt
+   */
+  _ufc_setup_salt_r (salt, data);
+
+  /*
+   * Setup key schedule
+   */
+  _ufc_clearmem (ktab, (int) sizeof (ktab));
+  (void) strncpy (ktab, key, 8);
+  _ufc_mk_keytab_r (ktab, data);
+
+  /*
+   * Go for the 25 DES encryptions
+   */
+  _ufc_clearmem ((char*) res, (int) sizeof (res));
+  _ufc_doit_r (xx,  data, &res[0]);
+
+  /*
+   * Do final permutations
+   */
+  _ufc_dofinalperm_r (res, data);
+
+  /*
+   * And convert back to 6 bit ASCII
+   */
+  _ufc_output_conversion_r (res[0], res[1], salt, data);
+  return data->crypt_3_buf;
+}
+weak_alias (__crypt_r, crypt_r)
+
+char *
+crypt (key, salt)
+     const char *key;
+     const char *salt;
+{
+#ifdef _LIBC
+  /* Try to find out whether we have to use MD5 encryption replacement.  */
+  if (strncmp (md5_salt_prefix, salt, sizeof (md5_salt_prefix) - 1) == 0)
+    return __md5_crypt (key, salt);
+#endif
+
+  return __crypt_r (key, salt, &_ufc_foobar);
+}
+
+
+/*
+ * To make fcrypt users happy.
+ * They don't need to call init_des.
+ */
+#ifdef _LIBC
+weak_alias (crypt, fcrypt)
+#else
+char *
+__fcrypt (key, salt)
+     const char *key;
+     const char *salt;
+{
+  return crypt (key, salt);
+}
+#endif
diff --git a/crypt/sysdeps/unix/crypt-private.h b/crypt/sysdeps/unix/crypt-private.h
new file mode 100644
index 0000000000..130cccd373
--- /dev/null
+++ b/crypt/sysdeps/unix/crypt-private.h
@@ -0,0 +1,60 @@
+/*
+ * UFC-crypt: ultra fast crypt(3) implementation
+ *
+ * Copyright (C) 1991, 92, 93, 96, 97, 98 Free Software Foundation, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; see the file COPYING.LIB.  If not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * @(#)crypt-private.h	1.4 12/20/96
+ */
+
+/* Prototypes for local functions in libcrypt.a.  */
+
+#ifndef CRYPT_PRIVATE_H
+#define CRYPT_PRIVATE_H	1
+
+#include <features.h>
+
+/* crypt.c */
+extern void _ufc_doit_r (ufc_long itr, struct crypt_data * __restrict __data,
+			 ufc_long *res);
+
+
+/* crypt_util.c */
+extern void __init_des_r (struct crypt_data * __restrict __data);
+extern void __init_des (void);
+
+extern void _ufc_setup_salt_r (__const char *s, 
+			       struct crypt_data * __restrict __data);
+extern void _ufc_mk_keytab_r (__const char *key, 
+			      struct crypt_data * __restrict __data);
+extern void _ufc_dofinalperm_r (ufc_long *res, 
+				struct crypt_data * __restrict __data);
+extern void _ufc_output_conversion_r (ufc_long v1, ufc_long v2,
+				      __const char *salt,
+				      struct crypt_data * __restrict __data);
+
+extern void __setkey_r (__const char *__key,
+			     struct crypt_data * __restrict __data);
+extern void __encrypt_r (char * __restrict __block, int __edflag,
+			      struct crypt_data * __restrict __data);
+
+/* crypt-entry.c */
+extern char *__crypt_r (__const char *__key, __const char *__salt,
+			     struct crypt_data * __restrict __data);
+extern char *fcrypt (__const char *key, __const char *salt);
+
+#endif  /* crypt-private.h */
diff --git a/crypt/sysdeps/unix/crypt.c b/crypt/sysdeps/unix/crypt.c
new file mode 100644
index 0000000000..8e2a576a70
--- /dev/null
+++ b/crypt/sysdeps/unix/crypt.c
@@ -0,0 +1,120 @@
+/*
+ * UFC-crypt: ultra fast crypt(3) implementation
+ *
+ * Copyright (C) 1991, 1992, 1993, 1996 Free Software Foundation, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; see the file COPYING.LIB.  If not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * @(#)crypt.c	2.25 12/20/96
+ *
+ * Semiportable C version
+ *
+ */
+
+#include "ufc-crypt.h"
+#include "crypt.h"
+#include "crypt-private.h"
+
+#ifdef _UFC_32_
+
+/*
+ * 32 bit version
+ */
+
+#define SBA(sb, v) (*(long32*)((char*)(sb)+(v)))
+
+void
+_ufc_doit_r(itr, __data, res)
+     ufc_long itr, *res;
+     struct crypt_data * __restrict __data;
+{
+  int i;
+  long32 s, *k;
+  long32 *sb01 = (long32*)__data->sb0;
+  long32 *sb23 = (long32*)__data->sb2;
+  long32 l1, l2, r1, r2;
+
+  l1 = (long32)res[0]; l2 = (long32)res[1];
+  r1 = (long32)res[2]; r2 = (long32)res[3];
+
+  while(itr--) {
+    k = (long32*)__data->keysched;
+    for(i=8; i--; ) {
+      s = *k++ ^ r1;
+      l1 ^= SBA(sb01, s & 0xffff); l2 ^= SBA(sb01, (s & 0xffff)+4);
+      l1 ^= SBA(sb01, s >>= 16  ); l2 ^= SBA(sb01, (s         )+4);
+      s = *k++ ^ r2;
+      l1 ^= SBA(sb23, s & 0xffff); l2 ^= SBA(sb23, (s & 0xffff)+4);
+      l1 ^= SBA(sb23, s >>= 16  ); l2 ^= SBA(sb23, (s         )+4);
+
+      s = *k++ ^ l1;
+      r1 ^= SBA(sb01, s & 0xffff); r2 ^= SBA(sb01, (s & 0xffff)+4);
+      r1 ^= SBA(sb01, s >>= 16  ); r2 ^= SBA(sb01, (s         )+4);
+      s = *k++ ^ l2;
+      r1 ^= SBA(sb23, s & 0xffff); r2 ^= SBA(sb23, (s & 0xffff)+4);
+      r1 ^= SBA(sb23, s >>= 16  ); r2 ^= SBA(sb23, (s         )+4);
+    }
+    s=l1; l1=r1; r1=s; s=l2; l2=r2; r2=s;
+  }
+  res[0] = l1; res[1] = l2; res[2] = r1; res[3] = r2;
+}
+
+#endif
+
+#ifdef _UFC_64_
+
+/*
+ * 64 bit version
+ */
+
+#define SBA(sb, v) (*(long64*)((char*)(sb)+(v)))
+
+void
+_ufc_doit_r(itr, __data, res)
+     ufc_long itr, *res;
+     struct crypt_data * __restrict __data;
+{
+  int i;
+  long64 l, r, s, *k;
+  register long64 *sb01 = (long64*)__data->sb0;
+  register long64 *sb23 = (long64*)__data->sb2;
+
+  l = (((long64)res[0]) << 32) | ((long64)res[1]);
+  r = (((long64)res[2]) << 32) | ((long64)res[3]);
+
+  while(itr--) {
+    k = (long64*)__data->keysched;
+    for(i=8; i--; ) {
+      s = *k++ ^ r;
+      l ^= SBA(sb23, (s       ) & 0xffff);
+      l ^= SBA(sb23, (s >>= 16) & 0xffff);
+      l ^= SBA(sb01, (s >>= 16) & 0xffff);
+      l ^= SBA(sb01, (s >>= 16)         );
+
+      s = *k++ ^ l;
+      r ^= SBA(sb23, (s       ) & 0xffff);
+      r ^= SBA(sb23, (s >>= 16) & 0xffff);
+      r ^= SBA(sb01, (s >>= 16) & 0xffff);
+      r ^= SBA(sb01, (s >>= 16)         );
+    }
+    s=l; l=r; r=s;
+  }
+
+  res[0] = l >> 32; res[1] = l & 0xffffffff;
+  res[2] = r >> 32; res[3] = r & 0xffffffff;
+}
+
+#endif
diff --git a/crypt/sysdeps/unix/crypt.h b/crypt/sysdeps/unix/crypt.h
new file mode 100644
index 0000000000..13191e6f33
--- /dev/null
+++ b/crypt/sysdeps/unix/crypt.h
@@ -0,0 +1,71 @@
+/*
+ * UFC-crypt: ultra fast crypt(3) implementation
+ *
+ * Copyright (C) 1991, 92, 93, 96, 97, 98 Free Software Foundation, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with the GNU C Library; see the file COPYING.LIB.  If not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * @(#)crypt.h	1.5 12/20/96
+ *
+ */
+
+#ifndef _CRYPT_H
+#define _CRYPT_H	1
+
+#include <features.h>
+
+__BEGIN_DECLS
+
+/* Encrypt at most 8 characters from KEY using salt to perturb DES.  */
+extern char *crypt __P ((__const char *__key, __const char *__salt));
+
+/* Setup DES tables according KEY.  */
+extern void setkey __P ((__const char *__key));
+
+/* Encrypt data in BLOCK in place if EDFLAG is zero; otherwise decrypt
+   block in place.  */
+extern void encrypt __P ((char *__block, int __edflag));
+
+#ifdef __USE_GNU
+/* Reentrant versions of the functions above.  The additional argument
+   points to a structure where the results are placed in.  */
+struct crypt_data
+  {
+    char keysched[16 * 8];
+    char sb0[32768];
+    char sb1[32768];
+    char sb2[32768];
+    char sb3[32768];
+    /* end-of-aligment-critical-data */
+    char crypt_3_buf[14];
+    char current_salt[2];
+    long int current_saltbits;
+    int  direction, initialized;
+  };
+
+extern char *crypt_r __P ((__const char *__key, __const char *__salt,
+			   struct crypt_data * __restrict __data));
+
+extern void setkey_r __P ((__const char *__key,
+			   struct crypt_data * __restrict __data));
+
+extern void encrypt_r __P ((char *__block, int __edflag,
+			    struct crypt_data * __restrict __data));
+#endif
+
+__END_DECLS
+
+#endif	/* crypt.h */
diff --git a/crypt/sysdeps/unix/crypt_util.c b/crypt/sysdeps/unix/crypt_util.c
new file mode 100644
index 0000000000..671571c51d
--- /dev/null
+++ b/crypt/sysdeps/unix/crypt_util.c
@@ -0,0 +1,914 @@
+/*
+ * UFC-crypt: ultra fast crypt(3) implementation
+ *
+ * Copyright (C) 1991, 92, 93, 96, 97, 98 Free Software Foundation, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; see the file COPYING.LIB.  If not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * @(#)crypt_util.c	2.56 12/20/96
+ *
+ * Support routines
+ *
+ */
+
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+#include <string.h>
+
+#ifndef STATIC
+#define STATIC static
+#endif
+
+#ifndef DOS
+#include "patchlevel.h"
+#include "ufc-crypt.h"
+#else
+/*
+ * Thanks to greg%wind@plains.NoDak.edu (Greg W. Wettstein)
+ * for DOS patches
+ */
+#include "pl.h"
+#include "ufc.h"
+#endif
+#include "crypt.h"
+#include "crypt-private.h"
+
+/* Prototypes for local functions.  */
+#if __STDC__ - 0
+#ifndef __GNU_LIBRARY__
+void _ufc_clearmem (char *start, int cnt);
+void _ufc_copymem (char *from, char *to, int cnt);
+#endif
+#ifdef _UFC_32_
+STATIC void shuffle_sb (long32 *k, ufc_long saltbits);
+#else
+STATIC void shuffle_sb (long64 *k, ufc_long saltbits);
+#endif
+#endif
+
+
+static const char patchlevel_str[] = PATCHLEVEL;
+
+/*
+ * Permutation done once on the 56 bit
+ *  key derived from the original 8 byte ASCII key.
+ */
+static const int pc1[56] = {
+  57, 49, 41, 33, 25, 17,  9,  1, 58, 50, 42, 34, 26, 18,
+  10,  2, 59, 51, 43, 35, 27, 19, 11,  3, 60, 52, 44, 36,
+  63, 55, 47, 39, 31, 23, 15,  7, 62, 54, 46, 38, 30, 22,
+  14,  6, 61, 53, 45, 37, 29, 21, 13,  5, 28, 20, 12,  4
+};
+
+/*
+ * How much to rotate each 28 bit half of the pc1 permutated
+ *  56 bit key before using pc2 to give the i' key
+ */
+static const int rots[16] = {
+  1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1
+};
+
+/*
+ * Permutation giving the key
+ * of the i' DES round
+ */
+static const int pc2[48] = {
+  14, 17, 11, 24,  1,  5,  3, 28, 15,  6, 21, 10,
+  23, 19, 12,  4, 26,  8, 16,  7, 27, 20, 13,  2,
+  41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48,
+  44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32
+};
+
+/*
+ * The E expansion table which selects
+ * bits from the 32 bit intermediate result.
+ */
+static const int esel[48] = {
+  32,  1,  2,  3,  4,  5,  4,  5,  6,  7,  8,  9,
+   8,  9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17,
+  16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25,
+  24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32,  1
+};
+
+/*
+ * Permutation done on the
+ * result of sbox lookups
+ */
+static const int perm32[32] = {
+  16,  7, 20, 21, 29, 12, 28, 17,  1, 15, 23, 26,  5, 18, 31, 10,
+  2,   8, 24, 14, 32, 27,  3,  9, 19, 13, 30,  6, 22, 11,  4, 25
+};
+
+/*
+ * The sboxes
+ */
+static const int sbox[8][4][16]= {
+        { { 14,  4, 13,  1,  2, 15, 11,  8,  3, 10,  6, 12,  5,  9,  0,  7 },
+          {  0, 15,  7,  4, 14,  2, 13,  1, 10,  6, 12, 11,  9,  5,  3,  8 },
+          {  4,  1, 14,  8, 13,  6,  2, 11, 15, 12,  9,  7,  3, 10,  5,  0 },
+          { 15, 12,  8,  2,  4,  9,  1,  7,  5, 11,  3, 14, 10,  0,  6, 13 }
+        },
+
+        { { 15,  1,  8, 14,  6, 11,  3,  4,  9,  7,  2, 13, 12,  0,  5, 10 },
+          {  3, 13,  4,  7, 15,  2,  8, 14, 12,  0,  1, 10,  6,  9, 11,  5 },
+          {  0, 14,  7, 11, 10,  4, 13,  1,  5,  8, 12,  6,  9,  3,  2, 15 },
+          { 13,  8, 10,  1,  3, 15,  4,  2, 11,  6,  7, 12,  0,  5, 14,  9 }
+        },
+
+        { { 10,  0,  9, 14,  6,  3, 15,  5,  1, 13, 12,  7, 11,  4,  2,  8 },
+          { 13,  7,  0,  9,  3,  4,  6, 10,  2,  8,  5, 14, 12, 11, 15,  1 },
+          { 13,  6,  4,  9,  8, 15,  3,  0, 11,  1,  2, 12,  5, 10, 14,  7 },
+          {  1, 10, 13,  0,  6,  9,  8,  7,  4, 15, 14,  3, 11,  5,  2, 12 }
+        },
+
+        { {  7, 13, 14,  3,  0,  6,  9, 10,  1,  2,  8,  5, 11, 12,  4, 15 },
+          { 13,  8, 11,  5,  6, 15,  0,  3,  4,  7,  2, 12,  1, 10, 14,  9 },
+          { 10,  6,  9,  0, 12, 11,  7, 13, 15,  1,  3, 14,  5,  2,  8,  4 },
+          {  3, 15,  0,  6, 10,  1, 13,  8,  9,  4,  5, 11, 12,  7,  2, 14 }
+        },
+
+        { {  2, 12,  4,  1,  7, 10, 11,  6,  8,  5,  3, 15, 13,  0, 14,  9 },
+          { 14, 11,  2, 12,  4,  7, 13,  1,  5,  0, 15, 10,  3,  9,  8,  6 },
+          {  4,  2,  1, 11, 10, 13,  7,  8, 15,  9, 12,  5,  6,  3,  0, 14 },
+          { 11,  8, 12,  7,  1, 14,  2, 13,  6, 15,  0,  9, 10,  4,  5,  3 }
+        },
+
+        { { 12,  1, 10, 15,  9,  2,  6,  8,  0, 13,  3,  4, 14,  7,  5, 11 },
+          { 10, 15,  4,  2,  7, 12,  9,  5,  6,  1, 13, 14,  0, 11,  3,  8 },
+          {  9, 14, 15,  5,  2,  8, 12,  3,  7,  0,  4, 10,  1, 13, 11,  6 },
+          {  4,  3,  2, 12,  9,  5, 15, 10, 11, 14,  1,  7,  6,  0,  8, 13 }
+        },
+
+        { {  4, 11,  2, 14, 15,  0,  8, 13,  3, 12,  9,  7,  5, 10,  6,  1 },
+          { 13,  0, 11,  7,  4,  9,  1, 10, 14,  3,  5, 12,  2, 15,  8,  6 },
+          {  1,  4, 11, 13, 12,  3,  7, 14, 10, 15,  6,  8,  0,  5,  9,  2 },
+          {  6, 11, 13,  8,  1,  4, 10,  7,  9,  5,  0, 15, 14,  2,  3, 12 }
+        },
+
+        { { 13,  2,  8,  4,  6, 15, 11,  1, 10,  9,  3, 14,  5,  0, 12,  7 },
+          {  1, 15, 13,  8, 10,  3,  7,  4, 12,  5,  6, 11,  0, 14,  9,  2 },
+          {  7, 11,  4,  1,  9, 12, 14,  2,  0,  6, 10, 13, 15,  3,  5,  8 },
+          {  2,  1, 14,  7,  4, 10,  8, 13, 15, 12,  9,  0,  3,  5,  6, 11 }
+        }
+};
+
+/*
+ * This is the initial
+ * permutation matrix
+ */
+static const int initial_perm[64] = {
+  58, 50, 42, 34, 26, 18, 10,  2, 60, 52, 44, 36, 28, 20, 12, 4,
+  62, 54, 46, 38, 30, 22, 14,  6, 64, 56, 48, 40, 32, 24, 16, 8,
+  57, 49, 41, 33, 25, 17,  9,  1, 59, 51, 43, 35, 27, 19, 11, 3,
+  61, 53, 45, 37, 29, 21, 13,  5, 63, 55, 47, 39, 31, 23, 15, 7
+};
+
+/*
+ * This is the final
+ * permutation matrix
+ */
+static const int final_perm[64] = {
+  40,  8, 48, 16, 56, 24, 64, 32, 39,  7, 47, 15, 55, 23, 63, 31,
+  38,  6, 46, 14, 54, 22, 62, 30, 37,  5, 45, 13, 53, 21, 61, 29,
+  36,  4, 44, 12, 52, 20, 60, 28, 35,  3, 43, 11, 51, 19, 59, 27,
+  34,  2, 42, 10, 50, 18, 58, 26, 33,  1, 41,  9, 49, 17, 57, 25
+};
+
+#define ascii_to_bin(c) ((c)>='a'?(c-59):(c)>='A'?((c)-53):(c)-'.')
+#define bin_to_ascii(c) ((c)>=38?((c)-38+'a'):(c)>=12?((c)-12+'A'):(c)+'.')
+
+static const ufc_long BITMASK[24] = {
+  0x40000000, 0x20000000, 0x10000000, 0x08000000, 0x04000000, 0x02000000,
+  0x01000000, 0x00800000, 0x00400000, 0x00200000, 0x00100000, 0x00080000,
+  0x00004000, 0x00002000, 0x00001000, 0x00000800, 0x00000400, 0x00000200,
+  0x00000100, 0x00000080, 0x00000040, 0x00000020, 0x00000010, 0x00000008
+};
+
+static const unsigned char bytemask[8]  = {
+  0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01
+};
+
+static const ufc_long longmask[32] = {
+  0x80000000, 0x40000000, 0x20000000, 0x10000000,
+  0x08000000, 0x04000000, 0x02000000, 0x01000000,
+  0x00800000, 0x00400000, 0x00200000, 0x00100000,
+  0x00080000, 0x00040000, 0x00020000, 0x00010000,
+  0x00008000, 0x00004000, 0x00002000, 0x00001000,
+  0x00000800, 0x00000400, 0x00000200, 0x00000100,
+  0x00000080, 0x00000040, 0x00000020, 0x00000010,
+  0x00000008, 0x00000004, 0x00000002, 0x00000001
+};
+
+/*
+ * do_pc1: permform pc1 permutation in the key schedule generation.
+ *
+ * The first   index is the byte number in the 8 byte ASCII key
+ *  -  second    -      -    the two 28 bits halfs of the result
+ *  -  third     -   selects the 7 bits actually used of each byte
+ *
+ * The result is kept with 28 bit per 32 bit with the 4 most significant
+ * bits zero.
+ */
+static ufc_long do_pc1[8][2][128];
+
+/*
+ * do_pc2: permform pc2 permutation in the key schedule generation.
+ *
+ * The first   index is the septet number in the two 28 bit intermediate values
+ *  -  second    -    -  -  septet values
+ *
+ * Knowledge of the structure of the pc2 permutation is used.
+ *
+ * The result is kept with 28 bit per 32 bit with the 4 most significant
+ * bits zero.
+ */
+static ufc_long do_pc2[8][128];
+
+/*
+ * eperm32tab: do 32 bit permutation and E selection
+ *
+ * The first index is the byte number in the 32 bit value to be permuted
+ *  -  second  -   is the value of this byte
+ *  -  third   -   selects the two 32 bit values
+ *
+ * The table is used and generated internally in init_des to speed it up
+ */
+static ufc_long eperm32tab[4][256][2];
+
+/*
+ * efp: undo an extra e selection and do final
+ *      permutation giving the DES result.
+ *
+ *      Invoked 6 bit a time on two 48 bit values
+ *      giving two 32 bit longs.
+ */
+static ufc_long efp[16][64][2];
+
+/*
+ * For use by the old, non-reentrant routines
+ * (crypt/encrypt/setkey)
+ */
+struct crypt_data _ufc_foobar;
+
+#ifdef __GNU_LIBRARY__
+#include <bits/libc-lock.h>
+
+__libc_lock_define_initialized (static, _ufc_tables_lock)
+#endif
+
+#ifdef DEBUG
+
+void
+_ufc_prbits(a, n)
+     ufc_long *a;
+     int n;
+{
+  ufc_long i, j, t, tmp;
+  n /= 8;
+  for(i = 0; i < n; i++) {
+    tmp=0;
+    for(j = 0; j < 8; j++) {
+      t=8*i+j;
+      tmp|=(a[t/24] & BITMASK[t % 24])?bytemask[j]:0;
+    }
+    (void)printf("%02x ",tmp);
+  }
+  printf(" ");
+}
+
+static void
+_ufc_set_bits(v, b)
+     ufc_long v;
+     ufc_long *b;
+{
+  ufc_long i;
+  *b = 0;
+  for(i = 0; i < 24; i++) {
+    if(v & longmask[8 + i])
+      *b |= BITMASK[i];
+  }
+}
+
+#endif
+
+#ifndef __GNU_LIBRARY__
+/*
+ * Silly rewrites of 'bzero'/'memset'. I do so
+ * because some machines don't have
+ * bzero and some don't have memset.
+ */
+
+void
+_ufc_clearmem(start, cnt)
+     char *start;
+     int cnt;
+{
+  while(cnt--)
+    *start++ = '\0';
+}
+
+void
+_ufc_copymem(from, to, cnt)
+     char *from, *to;
+     int cnt;
+{
+  while(cnt--)
+    *to++ = *from++;
+}
+#else
+#define _ufc_clearmem(start, cnt)   memset(start, 0, cnt)
+#define _ufc_copymem(from, to, cnt) memcpy(to, from, cnt)
+#endif
+
+/* lookup a 6 bit value in sbox */
+
+#define s_lookup(i,s) sbox[(i)][(((s)>>4) & 0x2)|((s) & 0x1)][((s)>>1) & 0xf];
+
+/*
+ * Initialize unit - may be invoked directly
+ * by fcrypt users.
+ */
+
+void
+__init_des_r(__data)
+     struct crypt_data * __restrict __data;
+{
+  int comes_from_bit;
+  int bit, sg;
+  ufc_long j;
+  ufc_long mask1, mask2;
+  int e_inverse[64];
+  static volatile int small_tables_initialized = 0;
+
+#ifdef _UFC_32_
+  long32 *sb[4];
+  sb[0] = (long32*)__data->sb0; sb[1] = (long32*)__data->sb1;
+  sb[2] = (long32*)__data->sb2; sb[3] = (long32*)__data->sb3;
+#endif
+#ifdef _UFC_64_
+  long64 *sb[4];
+  sb[0] = (long64*)__data->sb0; sb[1] = (long64*)__data->sb1;
+  sb[2] = (long64*)__data->sb2; sb[3] = (long64*)__data->sb3;
+#endif
+
+  if(small_tables_initialized == 0) {
+#ifdef __GNU_LIBRARY__
+    __libc_lock_lock (_ufc_tables_lock);
+    if(small_tables_initialized)
+      goto small_tables_done;
+#endif
+
+    /*
+     * Create the do_pc1 table used
+     * to affect pc1 permutation
+     * when generating keys
+     */
+    _ufc_clearmem((char*)do_pc1, (int)sizeof(do_pc1));
+    for(bit = 0; bit < 56; bit++) {
+      comes_from_bit  = pc1[bit] - 1;
+      mask1 = bytemask[comes_from_bit % 8 + 1];
+      mask2 = longmask[bit % 28 + 4];
+      for(j = 0; j < 128; j++) {
+	if(j & mask1)
+	  do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2;
+      }
+    }
+
+    /*
+     * Create the do_pc2 table used
+     * to affect pc2 permutation when
+     * generating keys
+     */
+    _ufc_clearmem((char*)do_pc2, (int)sizeof(do_pc2));
+    for(bit = 0; bit < 48; bit++) {
+      comes_from_bit  = pc2[bit] - 1;
+      mask1 = bytemask[comes_from_bit % 7 + 1];
+      mask2 = BITMASK[bit % 24];
+      for(j = 0; j < 128; j++) {
+	if(j & mask1)
+	  do_pc2[comes_from_bit / 7][j] |= mask2;
+      }
+    }
+
+    /*
+     * Now generate the table used to do combined
+     * 32 bit permutation and e expansion
+     *
+     * We use it because we have to permute 16384 32 bit
+     * longs into 48 bit in order to initialize sb.
+     *
+     * Looping 48 rounds per permutation becomes
+     * just too slow...
+     *
+     */
+
+    _ufc_clearmem((char*)eperm32tab, (int)sizeof(eperm32tab));
+    for(bit = 0; bit < 48; bit++) {
+      ufc_long mask1,comes_from;
+      comes_from = perm32[esel[bit]-1]-1;
+      mask1      = bytemask[comes_from % 8];
+      for(j = 256; j--;) {
+	if(j & mask1)
+	  eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK[bit % 24];
+      }
+    }
+
+    /*
+     * Create an inverse matrix for esel telling
+     * where to plug out bits if undoing it
+     */
+    for(bit=48; bit--;) {
+      e_inverse[esel[bit] - 1     ] = bit;
+      e_inverse[esel[bit] - 1 + 32] = bit + 48;
+    }
+
+    /*
+     * create efp: the matrix used to
+     * undo the E expansion and effect final permutation
+     */
+    _ufc_clearmem((char*)efp, (int)sizeof efp);
+    for(bit = 0; bit < 64; bit++) {
+      int o_bit, o_long;
+      ufc_long word_value, mask1, mask2;
+      int comes_from_f_bit, comes_from_e_bit;
+      int comes_from_word, bit_within_word;
+
+      /* See where bit i belongs in the two 32 bit long's */
+      o_long = bit / 32; /* 0..1  */
+      o_bit  = bit % 32; /* 0..31 */
+
+      /*
+       * And find a bit in the e permutated value setting this bit.
+       *
+       * Note: the e selection may have selected the same bit several
+       * times. By the initialization of e_inverse, we only look
+       * for one specific instance.
+       */
+      comes_from_f_bit = final_perm[bit] - 1;         /* 0..63 */
+      comes_from_e_bit = e_inverse[comes_from_f_bit]; /* 0..95 */
+      comes_from_word  = comes_from_e_bit / 6;        /* 0..15 */
+      bit_within_word  = comes_from_e_bit % 6;        /* 0..5  */
+
+      mask1 = longmask[bit_within_word + 26];
+      mask2 = longmask[o_bit];
+
+      for(word_value = 64; word_value--;) {
+	if(word_value & mask1)
+	  efp[comes_from_word][word_value][o_long] |= mask2;
+      }
+    }
+    small_tables_initialized = 1;
+#ifdef __GNU_LIBRARY__
+small_tables_done:
+    __libc_lock_unlock(_ufc_tables_lock);
+#endif
+  }
+
+  /*
+   * Create the sb tables:
+   *
+   * For each 12 bit segment of an 48 bit intermediate
+   * result, the sb table precomputes the two 4 bit
+   * values of the sbox lookups done with the two 6
+   * bit halves, shifts them to their proper place,
+   * sends them through perm32 and finally E expands
+   * them so that they are ready for the next
+   * DES round.
+   *
+   */
+
+  _ufc_clearmem((char*)__data->sb0, (int)sizeof(__data->sb0));
+  _ufc_clearmem((char*)__data->sb1, (int)sizeof(__data->sb1));
+  _ufc_clearmem((char*)__data->sb2, (int)sizeof(__data->sb2));
+  _ufc_clearmem((char*)__data->sb3, (int)sizeof(__data->sb3));
+
+  for(sg = 0; sg < 4; sg++) {
+    int j1, j2;
+    int s1, s2;
+
+    for(j1 = 0; j1 < 64; j1++) {
+      s1 = s_lookup(2 * sg, j1);
+      for(j2 = 0; j2 < 64; j2++) {
+	ufc_long to_permute, inx;
+
+	s2         = s_lookup(2 * sg + 1, j2);
+	to_permute = (((ufc_long)s1 << 4)  |
+		      (ufc_long)s2) << (24 - 8 * (ufc_long)sg);
+
+#ifdef _UFC_32_
+	inx = ((j1 << 6)  | j2) << 1;
+	sb[sg][inx  ]  = eperm32tab[0][(to_permute >> 24) & 0xff][0];
+	sb[sg][inx+1]  = eperm32tab[0][(to_permute >> 24) & 0xff][1];
+	sb[sg][inx  ] |= eperm32tab[1][(to_permute >> 16) & 0xff][0];
+	sb[sg][inx+1] |= eperm32tab[1][(to_permute >> 16) & 0xff][1];
+	sb[sg][inx  ] |= eperm32tab[2][(to_permute >>  8) & 0xff][0];
+	sb[sg][inx+1] |= eperm32tab[2][(to_permute >>  8) & 0xff][1];
+	sb[sg][inx  ] |= eperm32tab[3][(to_permute)       & 0xff][0];
+	sb[sg][inx+1] |= eperm32tab[3][(to_permute)       & 0xff][1];
+#endif
+#ifdef _UFC_64_
+	inx = ((j1 << 6)  | j2);
+	sb[sg][inx]  =
+	  ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) |
+	   (long64)eperm32tab[0][(to_permute >> 24) & 0xff][1];
+	sb[sg][inx] |=
+	  ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
+	   (long64)eperm32tab[1][(to_permute >> 16) & 0xff][1];
+	sb[sg][inx] |=
+	  ((long64)eperm32tab[2][(to_permute >>  8) & 0xff][0] << 32) |
+	   (long64)eperm32tab[2][(to_permute >>  8) & 0xff][1];
+	sb[sg][inx] |=
+	  ((long64)eperm32tab[3][(to_permute)       & 0xff][0] << 32) |
+	   (long64)eperm32tab[3][(to_permute)       & 0xff][1];
+#endif
+      }
+    }
+  }
+
+  __data->initialized++;
+}
+
+void
+__init_des()
+{
+  __init_des_r(&_ufc_foobar);
+}
+
+/*
+ * Process the elements of the sb table permuting the
+ * bits swapped in the expansion by the current salt.
+ */
+
+#ifdef _UFC_32_
+STATIC void
+shuffle_sb(k, saltbits)
+     long32 *k;
+     ufc_long saltbits;
+{
+  ufc_long j;
+  long32 x;
+  for(j=4096; j--;) {
+    x = (k[0] ^ k[1]) & (long32)saltbits;
+    *k++ ^= x;
+    *k++ ^= x;
+  }
+}
+#endif
+
+#ifdef _UFC_64_
+STATIC void
+shuffle_sb(k, saltbits)
+     long64 *k;
+     ufc_long saltbits;
+{
+  ufc_long j;
+  long64 x;
+  for(j=4096; j--;) {
+    x = ((*k >> 32) ^ *k) & (long64)saltbits;
+    *k++ ^= (x << 32) | x;
+  }
+}
+#endif
+
+/*
+ * Setup the unit for a new salt
+ * Hopefully we'll not see a new salt in each crypt call.
+ */
+
+void
+_ufc_setup_salt_r(s, __data)
+     __const char *s;
+     struct crypt_data * __restrict __data;
+{
+  ufc_long i, j, saltbits;
+
+  if(__data->initialized == 0)
+    __init_des_r(__data);
+
+  if(s[0] == __data->current_salt[0] && s[1] == __data->current_salt[1])
+    return;
+  __data->current_salt[0] = s[0]; __data->current_salt[1] = s[1];
+
+  /*
+   * This is the only crypt change to DES:
+   * entries are swapped in the expansion table
+   * according to the bits set in the salt.
+   */
+  saltbits = 0;
+  for(i = 0; i < 2; i++) {
+    long c=ascii_to_bin(s[i]);
+    for(j = 0; j < 6; j++) {
+      if((c >> j) & 0x1)
+	saltbits |= BITMASK[6 * i + j];
+    }
+  }
+
+  /*
+   * Permute the sb table values
+   * to reflect the changed e
+   * selection table
+   */
+#ifdef _UFC_32_
+#define LONGG long32*
+#endif
+#ifdef _UFC_64_
+#define LONGG long64*
+#endif
+
+  shuffle_sb((LONGG)__data->sb0, __data->current_saltbits ^ saltbits);
+  shuffle_sb((LONGG)__data->sb1, __data->current_saltbits ^ saltbits);
+  shuffle_sb((LONGG)__data->sb2, __data->current_saltbits ^ saltbits);
+  shuffle_sb((LONGG)__data->sb3, __data->current_saltbits ^ saltbits);
+
+  __data->current_saltbits = saltbits;
+}
+
+void
+_ufc_mk_keytab_r(key, __data)
+     const char *key;
+     struct crypt_data * __restrict __data;
+{
+  ufc_long v1, v2, *k1;
+  int i;
+#ifdef _UFC_32_
+  long32 v, *k2;
+  k2 = (long32*)__data->keysched;
+#endif
+#ifdef _UFC_64_
+  long64 v, *k2;
+  k2 = (long64*)__data->keysched;
+#endif
+
+  v1 = v2 = 0; k1 = &do_pc1[0][0][0];
+  for(i = 8; i--;) {
+    v1 |= k1[*key   & 0x7f]; k1 += 128;
+    v2 |= k1[*key++ & 0x7f]; k1 += 128;
+  }
+
+  for(i = 0; i < 16; i++) {
+    k1 = &do_pc2[0][0];
+
+    v1 = (v1 << rots[i]) | (v1 >> (28 - rots[i]));
+    v  = k1[(v1 >> 21) & 0x7f]; k1 += 128;
+    v |= k1[(v1 >> 14) & 0x7f]; k1 += 128;
+    v |= k1[(v1 >>  7) & 0x7f]; k1 += 128;
+    v |= k1[(v1      ) & 0x7f]; k1 += 128;
+
+#ifdef _UFC_32_
+    *k2++ = (v | 0x00008000);
+    v = 0;
+#endif
+#ifdef _UFC_64_
+    v = (v << 32);
+#endif
+
+    v2 = (v2 << rots[i]) | (v2 >> (28 - rots[i]));
+    v |= k1[(v2 >> 21) & 0x7f]; k1 += 128;
+    v |= k1[(v2 >> 14) & 0x7f]; k1 += 128;
+    v |= k1[(v2 >>  7) & 0x7f]; k1 += 128;
+    v |= k1[(v2      ) & 0x7f];
+
+#ifdef _UFC_32_
+    *k2++ = (v | 0x00008000);
+#endif
+#ifdef _UFC_64_
+    *k2++ = v | 0x0000800000008000l;
+#endif
+  }
+
+  __data->direction = 0;
+}
+
+/*
+ * Undo an extra E selection and do final permutations
+ */
+
+void
+_ufc_dofinalperm_r(res, __data)
+     ufc_long *res;
+     struct crypt_data * __restrict __data;
+{
+  ufc_long v1, v2, x;
+  ufc_long l1,l2,r1,r2;
+
+  l1 = res[0]; l2 = res[1];
+  r1 = res[2]; r2 = res[3];
+
+  x = (l1 ^ l2) & __data->current_saltbits; l1 ^= x; l2 ^= x;
+  x = (r1 ^ r2) & __data->current_saltbits; r1 ^= x; r2 ^= x;
+
+  v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
+
+  v1 |= efp[15][ r2         & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
+  v1 |= efp[14][(r2 >>= 6)  & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
+  v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
+  v1 |= efp[12][(r2 >>= 6)  & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
+
+  v1 |= efp[11][ r1         & 0x3f][0]; v2 |= efp[11][ r1 & 0x3f][1];
+  v1 |= efp[10][(r1 >>= 6)  & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1];
+  v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
+  v1 |= efp[ 8][(r1 >>= 6)  & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
+
+  v1 |= efp[ 7][ l2         & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1];
+  v1 |= efp[ 6][(l2 >>= 6)  & 0x3f][0]; v2 |= efp[ 6][ l2 & 0x3f][1];
+  v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
+  v1 |= efp[ 4][(l2 >>= 6)  & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
+
+  v1 |= efp[ 3][ l1         & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1];
+  v1 |= efp[ 2][(l1 >>= 6)  & 0x3f][0]; v2 |= efp[ 2][ l1 & 0x3f][1];
+  v1 |= efp[ 1][(l1 >>= 10) & 0x3f][0]; v2 |= efp[ 1][ l1 & 0x3f][1];
+  v1 |= efp[ 0][(l1 >>= 6)  & 0x3f][0]; v2 |= efp[ 0][ l1 & 0x3f][1];
+
+  res[0] = v1; res[1] = v2;
+}
+
+/*
+ * crypt only: convert from 64 bit to 11 bit ASCII
+ * prefixing with the salt
+ */
+
+void
+_ufc_output_conversion_r(v1, v2, salt, __data)
+     ufc_long v1, v2;
+     __const char *salt;
+     struct crypt_data * __restrict __data;
+{
+  int i, s, shf;
+
+  __data->crypt_3_buf[0] = salt[0];
+  __data->crypt_3_buf[1] = salt[1] ? salt[1] : salt[0];
+
+  for(i = 0; i < 5; i++) {
+    shf = (26 - 6 * i); /* to cope with MSC compiler bug */
+    __data->crypt_3_buf[i + 2] = bin_to_ascii((v1 >> shf) & 0x3f);
+  }
+
+  s  = (v2 & 0xf) << 2;
+  v2 = (v2 >> 2) | ((v1 & 0x3) << 30);
+
+  for(i = 5; i < 10; i++) {
+    shf = (56 - 6 * i);
+    __data->crypt_3_buf[i + 2] = bin_to_ascii((v2 >> shf) & 0x3f);
+  }
+
+  __data->crypt_3_buf[12] = bin_to_ascii(s);
+  __data->crypt_3_buf[13] = 0;
+}
+
+
+/*
+ * UNIX encrypt function. Takes a bitvector
+ * represented by one byte per bit and
+ * encrypt/decrypt according to edflag
+ */
+
+void
+__encrypt_r(__block, __edflag, __data)
+     char *__block;
+     int __edflag;
+     struct crypt_data * __restrict __data;
+{
+  ufc_long l1, l2, r1, r2, res[4];
+  int i;
+#ifdef _UFC_32_
+  long32 *kt;
+  kt = (long32*)__data->keysched;
+#endif
+#ifdef _UFC_64_
+  long64 *kt;
+  kt = (long64*)__data->keysched;
+#endif
+
+  /*
+   * Undo any salt changes to E expansion
+   */
+  _ufc_setup_salt_r("..", __data);
+
+  /*
+   * Reverse key table if
+   * changing operation (encrypt/decrypt)
+   */
+  if((__edflag == 0) != (__data->direction == 0)) {
+    for(i = 0; i < 8; i++) {
+#ifdef _UFC_32_
+      long32 x;
+      x = kt[2 * (15-i)];
+      kt[2 * (15-i)] = kt[2 * i];
+      kt[2 * i] = x;
+
+      x = kt[2 * (15-i) + 1];
+      kt[2 * (15-i) + 1] = kt[2 * i + 1];
+      kt[2 * i + 1] = x;
+#endif
+#ifdef _UFC_64_
+      long64 x;
+      x = kt[15-i];
+      kt[15-i] = kt[i];
+      kt[i] = x;
+#endif
+      }
+    __data->direction = __edflag;
+  }
+
+  /*
+   * Do initial permutation + E expansion
+   */
+  i = 0;
+  for(l1 = 0; i < 24; i++) {
+    if(__block[initial_perm[esel[i]-1]-1])
+      l1 |= BITMASK[i];
+  }
+  for(l2 = 0; i < 48; i++) {
+    if(__block[initial_perm[esel[i]-1]-1])
+      l2 |= BITMASK[i-24];
+  }
+
+  i = 0;
+  for(r1 = 0; i < 24; i++) {
+    if(__block[initial_perm[esel[i]-1+32]-1])
+      r1 |= BITMASK[i];
+  }
+  for(r2 = 0; i < 48; i++) {
+    if(__block[initial_perm[esel[i]-1+32]-1])
+      r2 |= BITMASK[i-24];
+  }
+
+  /*
+   * Do DES inner loops + final conversion
+   */
+  res[0] = l1; res[1] = l2;
+  res[2] = r1; res[3] = r2;
+  _ufc_doit_r((ufc_long)1, __data, &res[0]);
+
+  /*
+   * Do final permutations
+   */
+  _ufc_dofinalperm_r(res, __data);
+
+  /*
+   * And convert to bit array
+   */
+  l1 = res[0]; r1 = res[1];
+  for(i = 0; i < 32; i++) {
+    *__block++ = (l1 & longmask[i]) != 0;
+  }
+  for(i = 0; i < 32; i++) {
+    *__block++ = (r1 & longmask[i]) != 0;
+  }
+}
+weak_alias (__encrypt_r, encrypt_r)
+
+void
+encrypt(__block, __edflag)
+     char *__block;
+     int __edflag;
+{
+  __encrypt_r(__block, __edflag, &_ufc_foobar);
+}
+
+
+/*
+ * UNIX setkey function. Take a 64 bit DES
+ * key and setup the machinery.
+ */
+
+void
+__setkey_r(__key, __data)
+     __const char *__key;
+     struct crypt_data * __restrict __data;
+{
+  int i,j;
+  unsigned char c;
+  unsigned char ktab[8];
+
+  _ufc_setup_salt_r("..", __data); /* be sure we're initialized */
+
+  for(i = 0; i < 8; i++) {
+    for(j = 0, c = 0; j < 8; j++)
+      c = c << 1 | *__key++;
+    ktab[i] = c >> 1;
+  }
+  _ufc_mk_keytab_r(ktab, __data);
+}
+weak_alias (__setkey_r, setkey_r)
+
+void
+setkey(__key)
+     __const char *__key;
+{
+  __setkey_r(__key, &_ufc_foobar);
+}
diff --git a/crypt/sysdeps/unix/des_impl.c b/crypt/sysdeps/unix/des_impl.c
new file mode 100644
index 0000000000..f4fee720a1
--- /dev/null
+++ b/crypt/sysdeps/unix/des_impl.c
@@ -0,0 +1,615 @@
+/* Copyright (C) 1992 Eric Young - see COPYING for more details */
+/* Collected from libdes and modified for SECURE RPC by Martin Kuck 1994 */
+#include <string.h>
+#include "des.h"
+
+
+static const unsigned long des_SPtrans[8][64] =
+{
+  {				/* nibble 0 */
+    0x00820200, 0x00020000, 0x80800000, 0x80820200,
+    0x00800000, 0x80020200, 0x80020000, 0x80800000,
+    0x80020200, 0x00820200, 0x00820000, 0x80000200,
+    0x80800200, 0x00800000, 0x00000000, 0x80020000,
+    0x00020000, 0x80000000, 0x00800200, 0x00020200,
+    0x80820200, 0x00820000, 0x80000200, 0x00800200,
+    0x80000000, 0x00000200, 0x00020200, 0x80820000,
+    0x00000200, 0x80800200, 0x80820000, 0x00000000,
+    0x00000000, 0x80820200, 0x00800200, 0x80020000,
+    0x00820200, 0x00020000, 0x80000200, 0x00800200,
+    0x80820000, 0x00000200, 0x00020200, 0x80800000,
+    0x80020200, 0x80000000, 0x80800000, 0x00820000,
+    0x80820200, 0x00020200, 0x00820000, 0x80800200,
+    0x00800000, 0x80000200, 0x80020000, 0x00000000,
+    0x00020000, 0x00800000, 0x80800200, 0x00820200,
+    0x80000000, 0x80820000, 0x00000200, 0x80020200},
+
+  {				/* nibble 1 */
+    0x10042004, 0x00000000, 0x00042000, 0x10040000,
+    0x10000004, 0x00002004, 0x10002000, 0x00042000,
+    0x00002000, 0x10040004, 0x00000004, 0x10002000,
+    0x00040004, 0x10042000, 0x10040000, 0x00000004,
+    0x00040000, 0x10002004, 0x10040004, 0x00002000,
+    0x00042004, 0x10000000, 0x00000000, 0x00040004,
+    0x10002004, 0x00042004, 0x10042000, 0x10000004,
+    0x10000000, 0x00040000, 0x00002004, 0x10042004,
+    0x00040004, 0x10042000, 0x10002000, 0x00042004,
+    0x10042004, 0x00040004, 0x10000004, 0x00000000,
+    0x10000000, 0x00002004, 0x00040000, 0x10040004,
+    0x00002000, 0x10000000, 0x00042004, 0x10002004,
+    0x10042000, 0x00002000, 0x00000000, 0x10000004,
+    0x00000004, 0x10042004, 0x00042000, 0x10040000,
+    0x10040004, 0x00040000, 0x00002004, 0x10002000,
+    0x10002004, 0x00000004, 0x10040000, 0x00042000},
+
+  {				/* nibble 2 */
+    0x41000000, 0x01010040, 0x00000040, 0x41000040,
+    0x40010000, 0x01000000, 0x41000040, 0x00010040,
+    0x01000040, 0x00010000, 0x01010000, 0x40000000,
+    0x41010040, 0x40000040, 0x40000000, 0x41010000,
+    0x00000000, 0x40010000, 0x01010040, 0x00000040,
+    0x40000040, 0x41010040, 0x00010000, 0x41000000,
+    0x41010000, 0x01000040, 0x40010040, 0x01010000,
+    0x00010040, 0x00000000, 0x01000000, 0x40010040,
+    0x01010040, 0x00000040, 0x40000000, 0x00010000,
+    0x40000040, 0x40010000, 0x01010000, 0x41000040,
+    0x00000000, 0x01010040, 0x00010040, 0x41010000,
+    0x40010000, 0x01000000, 0x41010040, 0x40000000,
+    0x40010040, 0x41000000, 0x01000000, 0x41010040,
+    0x00010000, 0x01000040, 0x41000040, 0x00010040,
+    0x01000040, 0x00000000, 0x41010000, 0x40000040,
+    0x41000000, 0x40010040, 0x00000040, 0x01010000},
+
+  {				/* nibble 3 */
+    0x00100402, 0x04000400, 0x00000002, 0x04100402,
+    0x00000000, 0x04100000, 0x04000402, 0x00100002,
+    0x04100400, 0x04000002, 0x04000000, 0x00000402,
+    0x04000002, 0x00100402, 0x00100000, 0x04000000,
+    0x04100002, 0x00100400, 0x00000400, 0x00000002,
+    0x00100400, 0x04000402, 0x04100000, 0x00000400,
+    0x00000402, 0x00000000, 0x00100002, 0x04100400,
+    0x04000400, 0x04100002, 0x04100402, 0x00100000,
+    0x04100002, 0x00000402, 0x00100000, 0x04000002,
+    0x00100400, 0x04000400, 0x00000002, 0x04100000,
+    0x04000402, 0x00000000, 0x00000400, 0x00100002,
+    0x00000000, 0x04100002, 0x04100400, 0x00000400,
+    0x04000000, 0x04100402, 0x00100402, 0x00100000,
+    0x04100402, 0x00000002, 0x04000400, 0x00100402,
+    0x00100002, 0x00100400, 0x04100000, 0x04000402,
+    0x00000402, 0x04000000, 0x04000002, 0x04100400},
+
+  {				/* nibble 4 */
+    0x02000000, 0x00004000, 0x00000100, 0x02004108,
+    0x02004008, 0x02000100, 0x00004108, 0x02004000,
+    0x00004000, 0x00000008, 0x02000008, 0x00004100,
+    0x02000108, 0x02004008, 0x02004100, 0x00000000,
+    0x00004100, 0x02000000, 0x00004008, 0x00000108,
+    0x02000100, 0x00004108, 0x00000000, 0x02000008,
+    0x00000008, 0x02000108, 0x02004108, 0x00004008,
+    0x02004000, 0x00000100, 0x00000108, 0x02004100,
+    0x02004100, 0x02000108, 0x00004008, 0x02004000,
+    0x00004000, 0x00000008, 0x02000008, 0x02000100,
+    0x02000000, 0x00004100, 0x02004108, 0x00000000,
+    0x00004108, 0x02000000, 0x00000100, 0x00004008,
+    0x02000108, 0x00000100, 0x00000000, 0x02004108,
+    0x02004008, 0x02004100, 0x00000108, 0x00004000,
+    0x00004100, 0x02004008, 0x02000100, 0x00000108,
+    0x00000008, 0x00004108, 0x02004000, 0x02000008},
+
+  {				/* nibble 5 */
+    0x20000010, 0x00080010, 0x00000000, 0x20080800,
+    0x00080010, 0x00000800, 0x20000810, 0x00080000,
+    0x00000810, 0x20080810, 0x00080800, 0x20000000,
+    0x20000800, 0x20000010, 0x20080000, 0x00080810,
+    0x00080000, 0x20000810, 0x20080010, 0x00000000,
+    0x00000800, 0x00000010, 0x20080800, 0x20080010,
+    0x20080810, 0x20080000, 0x20000000, 0x00000810,
+    0x00000010, 0x00080800, 0x00080810, 0x20000800,
+    0x00000810, 0x20000000, 0x20000800, 0x00080810,
+    0x20080800, 0x00080010, 0x00000000, 0x20000800,
+    0x20000000, 0x00000800, 0x20080010, 0x00080000,
+    0x00080010, 0x20080810, 0x00080800, 0x00000010,
+    0x20080810, 0x00080800, 0x00080000, 0x20000810,
+    0x20000010, 0x20080000, 0x00080810, 0x00000000,
+    0x00000800, 0x20000010, 0x20000810, 0x20080800,
+    0x20080000, 0x00000810, 0x00000010, 0x20080010},
+
+  {				/* nibble 6 */
+    0x00001000, 0x00000080, 0x00400080, 0x00400001,
+    0x00401081, 0x00001001, 0x00001080, 0x00000000,
+    0x00400000, 0x00400081, 0x00000081, 0x00401000,
+    0x00000001, 0x00401080, 0x00401000, 0x00000081,
+    0x00400081, 0x00001000, 0x00001001, 0x00401081,
+    0x00000000, 0x00400080, 0x00400001, 0x00001080,
+    0x00401001, 0x00001081, 0x00401080, 0x00000001,
+    0x00001081, 0x00401001, 0x00000080, 0x00400000,
+    0x00001081, 0x00401000, 0x00401001, 0x00000081,
+    0x00001000, 0x00000080, 0x00400000, 0x00401001,
+    0x00400081, 0x00001081, 0x00001080, 0x00000000,
+    0x00000080, 0x00400001, 0x00000001, 0x00400080,
+    0x00000000, 0x00400081, 0x00400080, 0x00001080,
+    0x00000081, 0x00001000, 0x00401081, 0x00400000,
+    0x00401080, 0x00000001, 0x00001001, 0x00401081,
+    0x00400001, 0x00401080, 0x00401000, 0x00001001},
+
+  {				/* nibble 7 */
+    0x08200020, 0x08208000, 0x00008020, 0x00000000,
+    0x08008000, 0x00200020, 0x08200000, 0x08208020,
+    0x00000020, 0x08000000, 0x00208000, 0x00008020,
+    0x00208020, 0x08008020, 0x08000020, 0x08200000,
+    0x00008000, 0x00208020, 0x00200020, 0x08008000,
+    0x08208020, 0x08000020, 0x00000000, 0x00208000,
+    0x08000000, 0x00200000, 0x08008020, 0x08200020,
+    0x00200000, 0x00008000, 0x08208000, 0x00000020,
+    0x00200000, 0x00008000, 0x08000020, 0x08208020,
+    0x00008020, 0x08000000, 0x00000000, 0x00208000,
+    0x08200020, 0x08008020, 0x08008000, 0x00200020,
+    0x08208000, 0x00000020, 0x00200020, 0x08008000,
+    0x08208020, 0x00200000, 0x08200000, 0x08000020,
+    0x00208000, 0x00008020, 0x08008020, 0x08200000,
+    0x00000020, 0x08208000, 0x00208020, 0x00000000,
+    0x08000000, 0x08200020, 0x00008000, 0x00208020}};
+
+static const unsigned long des_skb[8][64] =
+{
+  {				/* for C bits (numbered as per FIPS 46) 1 2 3 4 5 6 */
+    0x00000000, 0x00000010, 0x20000000, 0x20000010,
+    0x00010000, 0x00010010, 0x20010000, 0x20010010,
+    0x00000800, 0x00000810, 0x20000800, 0x20000810,
+    0x00010800, 0x00010810, 0x20010800, 0x20010810,
+    0x00000020, 0x00000030, 0x20000020, 0x20000030,
+    0x00010020, 0x00010030, 0x20010020, 0x20010030,
+    0x00000820, 0x00000830, 0x20000820, 0x20000830,
+    0x00010820, 0x00010830, 0x20010820, 0x20010830,
+    0x00080000, 0x00080010, 0x20080000, 0x20080010,
+    0x00090000, 0x00090010, 0x20090000, 0x20090010,
+    0x00080800, 0x00080810, 0x20080800, 0x20080810,
+    0x00090800, 0x00090810, 0x20090800, 0x20090810,
+    0x00080020, 0x00080030, 0x20080020, 0x20080030,
+    0x00090020, 0x00090030, 0x20090020, 0x20090030,
+    0x00080820, 0x00080830, 0x20080820, 0x20080830,
+    0x00090820, 0x00090830, 0x20090820, 0x20090830},
+  {				/* for C bits (numbered as per FIPS 46) 7 8 10 11 12 13 */
+    0x00000000, 0x02000000, 0x00002000, 0x02002000,
+    0x00200000, 0x02200000, 0x00202000, 0x02202000,
+    0x00000004, 0x02000004, 0x00002004, 0x02002004,
+    0x00200004, 0x02200004, 0x00202004, 0x02202004,
+    0x00000400, 0x02000400, 0x00002400, 0x02002400,
+    0x00200400, 0x02200400, 0x00202400, 0x02202400,
+    0x00000404, 0x02000404, 0x00002404, 0x02002404,
+    0x00200404, 0x02200404, 0x00202404, 0x02202404,
+    0x10000000, 0x12000000, 0x10002000, 0x12002000,
+    0x10200000, 0x12200000, 0x10202000, 0x12202000,
+    0x10000004, 0x12000004, 0x10002004, 0x12002004,
+    0x10200004, 0x12200004, 0x10202004, 0x12202004,
+    0x10000400, 0x12000400, 0x10002400, 0x12002400,
+    0x10200400, 0x12200400, 0x10202400, 0x12202400,
+    0x10000404, 0x12000404, 0x10002404, 0x12002404,
+    0x10200404, 0x12200404, 0x10202404, 0x12202404},
+  {				/* for C bits (numbered as per FIPS 46) 14 15 16 17 19 20 */
+    0x00000000, 0x00000001, 0x00040000, 0x00040001,
+    0x01000000, 0x01000001, 0x01040000, 0x01040001,
+    0x00000002, 0x00000003, 0x00040002, 0x00040003,
+    0x01000002, 0x01000003, 0x01040002, 0x01040003,
+    0x00000200, 0x00000201, 0x00040200, 0x00040201,
+    0x01000200, 0x01000201, 0x01040200, 0x01040201,
+    0x00000202, 0x00000203, 0x00040202, 0x00040203,
+    0x01000202, 0x01000203, 0x01040202, 0x01040203,
+    0x08000000, 0x08000001, 0x08040000, 0x08040001,
+    0x09000000, 0x09000001, 0x09040000, 0x09040001,
+    0x08000002, 0x08000003, 0x08040002, 0x08040003,
+    0x09000002, 0x09000003, 0x09040002, 0x09040003,
+    0x08000200, 0x08000201, 0x08040200, 0x08040201,
+    0x09000200, 0x09000201, 0x09040200, 0x09040201,
+    0x08000202, 0x08000203, 0x08040202, 0x08040203,
+    0x09000202, 0x09000203, 0x09040202, 0x09040203},
+  {				/* for C bits (numbered as per FIPS 46) 21 23 24 26 27 28 */
+    0x00000000, 0x00100000, 0x00000100, 0x00100100,
+    0x00000008, 0x00100008, 0x00000108, 0x00100108,
+    0x00001000, 0x00101000, 0x00001100, 0x00101100,
+    0x00001008, 0x00101008, 0x00001108, 0x00101108,
+    0x04000000, 0x04100000, 0x04000100, 0x04100100,
+    0x04000008, 0x04100008, 0x04000108, 0x04100108,
+    0x04001000, 0x04101000, 0x04001100, 0x04101100,
+    0x04001008, 0x04101008, 0x04001108, 0x04101108,
+    0x00020000, 0x00120000, 0x00020100, 0x00120100,
+    0x00020008, 0x00120008, 0x00020108, 0x00120108,
+    0x00021000, 0x00121000, 0x00021100, 0x00121100,
+    0x00021008, 0x00121008, 0x00021108, 0x00121108,
+    0x04020000, 0x04120000, 0x04020100, 0x04120100,
+    0x04020008, 0x04120008, 0x04020108, 0x04120108,
+    0x04021000, 0x04121000, 0x04021100, 0x04121100,
+    0x04021008, 0x04121008, 0x04021108, 0x04121108},
+  {				/* for D bits (numbered as per FIPS 46) 1 2 3 4 5 6 */
+    0x00000000, 0x10000000, 0x00010000, 0x10010000,
+    0x00000004, 0x10000004, 0x00010004, 0x10010004,
+    0x20000000, 0x30000000, 0x20010000, 0x30010000,
+    0x20000004, 0x30000004, 0x20010004, 0x30010004,
+    0x00100000, 0x10100000, 0x00110000, 0x10110000,
+    0x00100004, 0x10100004, 0x00110004, 0x10110004,
+    0x20100000, 0x30100000, 0x20110000, 0x30110000,
+    0x20100004, 0x30100004, 0x20110004, 0x30110004,
+    0x00001000, 0x10001000, 0x00011000, 0x10011000,
+    0x00001004, 0x10001004, 0x00011004, 0x10011004,
+    0x20001000, 0x30001000, 0x20011000, 0x30011000,
+    0x20001004, 0x30001004, 0x20011004, 0x30011004,
+    0x00101000, 0x10101000, 0x00111000, 0x10111000,
+    0x00101004, 0x10101004, 0x00111004, 0x10111004,
+    0x20101000, 0x30101000, 0x20111000, 0x30111000,
+    0x20101004, 0x30101004, 0x20111004, 0x30111004},
+  {				/* for D bits (numbered as per FIPS 46) 8 9 11 12 13 14 */
+    0x00000000, 0x08000000, 0x00000008, 0x08000008,
+    0x00000400, 0x08000400, 0x00000408, 0x08000408,
+    0x00020000, 0x08020000, 0x00020008, 0x08020008,
+    0x00020400, 0x08020400, 0x00020408, 0x08020408,
+    0x00000001, 0x08000001, 0x00000009, 0x08000009,
+    0x00000401, 0x08000401, 0x00000409, 0x08000409,
+    0x00020001, 0x08020001, 0x00020009, 0x08020009,
+    0x00020401, 0x08020401, 0x00020409, 0x08020409,
+    0x02000000, 0x0A000000, 0x02000008, 0x0A000008,
+    0x02000400, 0x0A000400, 0x02000408, 0x0A000408,
+    0x02020000, 0x0A020000, 0x02020008, 0x0A020008,
+    0x02020400, 0x0A020400, 0x02020408, 0x0A020408,
+    0x02000001, 0x0A000001, 0x02000009, 0x0A000009,
+    0x02000401, 0x0A000401, 0x02000409, 0x0A000409,
+    0x02020001, 0x0A020001, 0x02020009, 0x0A020009,
+    0x02020401, 0x0A020401, 0x02020409, 0x0A020409},
+  {				/* for D bits (numbered as per FIPS 46) 16 17 18 19 20 21 */
+    0x00000000, 0x00000100, 0x00080000, 0x00080100,
+    0x01000000, 0x01000100, 0x01080000, 0x01080100,
+    0x00000010, 0x00000110, 0x00080010, 0x00080110,
+    0x01000010, 0x01000110, 0x01080010, 0x01080110,
+    0x00200000, 0x00200100, 0x00280000, 0x00280100,
+    0x01200000, 0x01200100, 0x01280000, 0x01280100,
+    0x00200010, 0x00200110, 0x00280010, 0x00280110,
+    0x01200010, 0x01200110, 0x01280010, 0x01280110,
+    0x00000200, 0x00000300, 0x00080200, 0x00080300,
+    0x01000200, 0x01000300, 0x01080200, 0x01080300,
+    0x00000210, 0x00000310, 0x00080210, 0x00080310,
+    0x01000210, 0x01000310, 0x01080210, 0x01080310,
+    0x00200200, 0x00200300, 0x00280200, 0x00280300,
+    0x01200200, 0x01200300, 0x01280200, 0x01280300,
+    0x00200210, 0x00200310, 0x00280210, 0x00280310,
+    0x01200210, 0x01200310, 0x01280210, 0x01280310},
+  {				/* for D bits (numbered as per FIPS 46) 22 23 24 25 27 28 */
+    0x00000000, 0x04000000, 0x00040000, 0x04040000,
+    0x00000002, 0x04000002, 0x00040002, 0x04040002,
+    0x00002000, 0x04002000, 0x00042000, 0x04042000,
+    0x00002002, 0x04002002, 0x00042002, 0x04042002,
+    0x00000020, 0x04000020, 0x00040020, 0x04040020,
+    0x00000022, 0x04000022, 0x00040022, 0x04040022,
+    0x00002020, 0x04002020, 0x00042020, 0x04042020,
+    0x00002022, 0x04002022, 0x00042022, 0x04042022,
+    0x00000800, 0x04000800, 0x00040800, 0x04040800,
+    0x00000802, 0x04000802, 0x00040802, 0x04040802,
+    0x00002800, 0x04002800, 0x00042800, 0x04042800,
+    0x00002802, 0x04002802, 0x00042802, 0x04042802,
+    0x00000820, 0x04000820, 0x00040820, 0x04040820,
+    0x00000822, 0x04000822, 0x00040822, 0x04040822,
+    0x00002820, 0x04002820, 0x00042820, 0x04042820,
+    0x00002822, 0x04002822, 0x00042822, 0x04042822},
+};
+
+#define c2l(c,l)	(l =((unsigned long)(*((c)++)))    , \
+			 l|=((unsigned long)(*((c)++)))<< 8, \
+			 l|=((unsigned long)(*((c)++)))<<16, \
+			 l|=((unsigned long)(*((c)++)))<<24)
+
+#define l2c(l,c)	(*((c)++)=(unsigned char)(((l)    )&0xff), \
+			 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
+			 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
+			 *((c)++)=(unsigned char)(((l)>>24)&0xff))
+
+/*
+ * IP and FP
+ * The problem is more of a geometric problem that random bit fiddling.
+ *  0  1  2  3  4  5  6  7      62 54 46 38 30 22 14  6
+ *  8  9 10 11 12 13 14 15      60 52 44 36 28 20 12  4
+ * 16 17 18 19 20 21 22 23      58 50 42 34 26 18 10  2
+ * 24 25 26 27 28 29 30 31  to  56 48 40 32 24 16  8  0
+ *
+ * 32 33 34 35 36 37 38 39      63 55 47 39 31 23 15  7
+ * 40 41 42 43 44 45 46 47      61 53 45 37 29 21 13  5
+ * 48 49 50 51 52 53 54 55      59 51 43 35 27 19 11  3
+ * 56 57 58 59 60 61 62 63      57 49 41 33 25 17  9  1
+ *
+ * The output has been subject to swaps of the form
+ * 0 1 -> 3 1 but the odd and even bits have been put into
+ * 2 3    2 0
+ * different words.  The main trick is to remember that
+ * t=((l>>size)^r)&(mask);
+ * r^=t;
+ * l^=(t<<size);
+ * can be used to swap and move bits between words.
+ *
+ * So l =  0  1  2  3  r = 16 17 18 19
+ *         4  5  6  7      20 21 22 23
+ *         8  9 10 11      24 25 26 27
+ *        12 13 14 15      28 29 30 31
+ * becomes (for size == 2 and mask == 0x3333)
+ * t =   2^16  3^17 -- --   l =  0  1 16 17  r =  2  3 18 19
+ *       6^20  7^21 -- --        4  5 20 21       6  7 22 23
+ *      10^24 11^25 -- --        8  9 24 25      10 11 24 25
+ *      14^28 15^29 -- --       12 13 28 29      14 15 28 29
+ *
+ * Thanks for hints from Richard Outerbridge - he told me IP&FP
+ * could be done in 15 xor, 10 shifts and 5 ands.
+ * When I finally started to think of the problem in 2D
+ * I first got ~42 operations without xors.  When I remembered
+ * how to use xors :-) I got it to its final state.
+ */
+
+#define PERM_OP(a,b,t,n,m) ((t)=((((a)>>(n))^(b))&(m)),\
+	(b)^=(t),\
+	(a)^=((t)<<(n)))
+
+#define HPERM_OP(a,t,n,m) ((t)=((((a)<<(16-(n)))^(a))&(m)),\
+	(a)=(a)^(t)^(t>>(16-(n))))
+
+
+/* The changes to this macro may help or hinder, depending on the
+ * compiler and the achitecture.  gcc2 always seems to do well :-).
+ * Inspired by Dana How <how@isl.stanford.edu>
+ * DO NOT use the alternative version on machines with 8 byte longs.
+ */
+#ifdef ALT_ECB
+#define D_ENCRYPT(L,R,S) \
+	u=((R^s[S  ])<<2);	\
+	t= R^s[S+1]; \
+	t=((t>>2)+(t<<30)); \
+	L^= \
+	*(const unsigned long *)(des_SP+0x0100+((t    )&0xfc))+ \
+	*(const unsigned long *)(des_SP+0x0300+((t>> 8)&0xfc))+ \
+	*(const unsigned long *)(des_SP+0x0500+((t>>16)&0xfc))+ \
+	*(const unsigned long *)(des_SP+0x0700+((t>>24)&0xfc))+ \
+	*(const unsigned long *)(des_SP+       ((u    )&0xfc))+ \
+  	*(const unsigned long *)(des_SP+0x0200+((u>> 8)&0xfc))+ \
+  	*(const unsigned long *)(des_SP+0x0400+((u>>16)&0xfc))+ \
+ 	*(const unsigned long *)(des_SP+0x0600+((u>>24)&0xfc));
+#else /* original version */
+#define D_ENCRYPT(L,R,S)	\
+	u=(R^s[S  ]); \
+	t=R^s[S+1]; \
+	t=((t>>4)+(t<<28)); \
+	L^=	des_SPtrans[1][(t    )&0x3f]| \
+		des_SPtrans[3][(t>> 8)&0x3f]| \
+		des_SPtrans[5][(t>>16)&0x3f]| \
+		des_SPtrans[7][(t>>24)&0x3f]| \
+		des_SPtrans[0][(u    )&0x3f]| \
+		des_SPtrans[2][(u>> 8)&0x3f]| \
+		des_SPtrans[4][(u>>16)&0x3f]| \
+		des_SPtrans[6][(u>>24)&0x3f];
+#endif
+
+#define ITERATIONS 16
+
+static const char shifts2[16] =
+{0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0};
+
+static void des_set_key (char *, unsigned long *) internal_function;
+static void des_encrypt (unsigned long *, unsigned long *, int)
+     internal_function;
+int _des_crypt (char *, unsigned, struct desparams *);
+
+static void
+internal_function
+des_set_key (char *key, unsigned long *schedule)
+{
+  register unsigned long c, d, t, s;
+  register unsigned char *in;
+  register unsigned long *k;
+  register int i;
+
+  k = (unsigned long *) schedule;
+  in = (unsigned char *) key;
+
+  c2l (in, c);
+  c2l (in, d);
+
+  /* I now do it in 47 simple operations :-)
+   * Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov)
+   * for the inspiration. :-) */
+  PERM_OP (d, c, t, 4, 0x0f0f0f0f);
+  HPERM_OP (c, t, -2, 0xcccc0000);
+  HPERM_OP (d, t, -2, 0xcccc0000);
+  PERM_OP (d, c, t, 1, 0x55555555);
+  PERM_OP (c, d, t, 8, 0x00ff00ff);
+  PERM_OP (d, c, t, 1, 0x55555555);
+  d = (((d & 0x000000ff) << 16) | (d & 0x0000ff00) |
+       ((d & 0x00ff0000) >> 16) | ((c & 0xf0000000) >> 4));
+  c &= 0x0fffffff;
+
+  for (i = 0; i < ITERATIONS; i++)
+    {
+      if (shifts2[i])
+	{
+	  c = ((c >> 2) | (c << 26));
+	  d = ((d >> 2) | (d << 26));
+	}
+      else
+	{
+	  c = ((c >> 1) | (c << 27));
+	  d = ((d >> 1) | (d << 27));
+	}
+      c &= 0x0fffffff;
+      d &= 0x0fffffff;
+      /* could be a few less shifts but I am to lazy at this
+       * point in time to investigate */
+      s = des_skb[0][(c) & 0x3f] |
+	des_skb[1][((c >> 6) & 0x03) | ((c >> 7) & 0x3c)] |
+	des_skb[2][((c >> 13) & 0x0f) | ((c >> 14) & 0x30)] |
+	des_skb[3][((c >> 20) & 0x01) | ((c >> 21) & 0x06) | ((c >> 22) & 0x38)];
+      t = des_skb[4][(d) & 0x3f] |
+	des_skb[5][((d >> 7) & 0x03) | ((d >> 8) & 0x3c)] |
+	des_skb[6][(d >> 15) & 0x3f] |
+	des_skb[7][((d >> 21) & 0x0f) | ((d >> 22) & 0x30)];
+
+      /* table contained 0213 4657 */
+      *(k++) = ((t << 16) | (s & 0x0000ffff)) & 0xffffffff;
+      s = ((s >> 16) | (t & 0xffff0000));
+
+      s = (s << 4) | (s >> 28);
+      *(k++) = s & 0xffffffff;
+    }
+}
+
+
+static void
+internal_function
+des_encrypt (unsigned long *buf, unsigned long *schedule, int encrypt)
+{
+  register unsigned long l, r, t, u;
+#ifdef ALT_ECB
+  register const unsigned char *des_SP = (const unsigned char *) des_SPtrans;
+#endif
+  register int i;
+  register unsigned long *s;
+
+  l = buf[0];
+  r = buf[1];
+
+  /* do IP */
+  PERM_OP (r, l, t, 4, 0x0f0f0f0f);
+  PERM_OP (l, r, t, 16, 0x0000ffff);
+  PERM_OP (r, l, t, 2, 0x33333333);
+  PERM_OP (l, r, t, 8, 0x00ff00ff);
+  PERM_OP (r, l, t, 1, 0x55555555);
+  /* r and l are reversed - remember that :-) - fix
+   * it in the next step */
+
+  /* Things have been modified so that the initial rotate is
+   * done outside the loop.  This required the
+   * des_SPtrans values in sp.h to be rotated 1 bit to the right.
+   * One perl script later and things have a 5% speed up on a sparc2.
+   * Thanks to Richard Outerbridge <71755.204@CompuServe.COM>
+   * for pointing this out. */
+  t = (r << 1) | (r >> 31);
+  r = (l << 1) | (l >> 31);
+  l = t;
+
+  /* clear the top bits on machines with 8byte longs */
+  l &= 0xffffffff;
+  r &= 0xffffffff;
+
+  s = (unsigned long *) schedule;
+  /* I don't know if it is worth the effort of loop unrolling the
+   * inner loop */
+  if (encrypt)
+    {
+      for (i = 0; i < 32; i += 4)
+	{
+	  D_ENCRYPT (l, r, i + 0);	/*  1 */
+	  D_ENCRYPT (r, l, i + 2);	/*  2 */
+	}
+    }
+  else
+    {
+      for (i = 30; i > 0; i -= 4)
+	{
+	  D_ENCRYPT (l, r, i - 0);	/* 16 */
+	  D_ENCRYPT (r, l, i - 2);	/* 15 */
+	}
+    }
+  l = (l >> 1) | (l << 31);
+  r = (r >> 1) | (r << 31);
+  /* clear the top bits on machines with 8byte longs */
+  l &= 0xffffffff;
+  r &= 0xffffffff;
+
+  /* swap l and r
+   * we will not do the swap so just remember they are
+   * reversed for the rest of the subroutine
+   * luckily FP fixes this problem :-) */
+
+  PERM_OP (r, l, t, 1, 0x55555555);
+  PERM_OP (l, r, t, 8, 0x00ff00ff);
+  PERM_OP (r, l, t, 2, 0x33333333);
+  PERM_OP (l, r, t, 16, 0x0000ffff);
+  PERM_OP (r, l, t, 4, 0x0f0f0f0f);
+
+  buf[0] = l;
+  buf[1] = r;
+
+  l = r = t = u = 0;
+}
+
+
+int
+_des_crypt (char *buf, unsigned len, struct desparams *desp)
+{
+  unsigned long schedule[32];
+  register unsigned long tin0, tin1;
+  register unsigned long tout0, tout1, xor0, xor1;
+  register unsigned char *in, *out;
+  unsigned long tbuf[2];
+  unsigned char *iv, *oiv;
+  int cbc_mode;
+
+  cbc_mode = (desp->des_mode == CBC) ? 1 : 0;
+
+  in = (unsigned char *) buf;
+  out = (unsigned char *) buf;
+  oiv = iv = (unsigned char *) desp->des_ivec;
+
+  des_set_key (desp->des_key, schedule);
+
+  tin0 = tin1 = 0;		/* For GCC */
+  if (desp->des_dir == ENCRYPT)
+    {
+      c2l (iv, tout0);
+      c2l (iv, tout1);
+      for (; len > 0; len -= 8)
+	{
+	  c2l (in, tin0);
+	  c2l (in, tin1);
+	  if (cbc_mode)
+	    {
+	      tin0 ^= tout0;
+	      tin1 ^= tout1;
+	    }
+	  tbuf[0] = tin0;
+	  tbuf[1] = tin1;
+	  des_encrypt (tbuf, schedule, 1);
+	  tout0 = tbuf[0];
+	  tout1 = tbuf[1];
+	  l2c (tout0, out);
+	  l2c (tout1, out);
+	}
+      l2c (tout0, oiv);
+      l2c (tout1, oiv);
+    }
+  else
+    {
+      c2l (iv, xor0);
+      c2l (iv, xor1);
+      for (; len > 0; len -= 8)
+	{
+	  c2l (in, tin0);
+	  c2l (in, tin1);
+	  tbuf[0] = tin0;
+	  tbuf[1] = tin1;
+	  des_encrypt (tbuf, schedule, 0);
+	  if (cbc_mode)
+	    {
+	      tout0 = tbuf[0] ^ xor0;
+	      tout1 = tbuf[1] ^ xor1;
+	      xor0 = tin0;
+	      xor1 = tin1;
+	    }
+	  else
+	    {
+	      tout0 = tbuf[0];
+	      tout1 = tbuf[1];
+	    }
+	  l2c (tout0, out);
+	  l2c (tout1, out);
+	}
+      l2c (tin0, oiv);
+      l2c (tin1, oiv);
+    }
+  tin0 = tin1 = tout0 = tout1 = xor0 = xor1 = 0;
+  tbuf[0] = tbuf[1] = 0;
+  __bzero (schedule, sizeof (schedule));
+
+  return (1);
+}
diff --git a/crypt/sysdeps/unix/patchlevel.h b/crypt/sysdeps/unix/patchlevel.h
new file mode 100644
index 0000000000..450c091f60
--- /dev/null
+++ b/crypt/sysdeps/unix/patchlevel.h
@@ -0,0 +1,25 @@
+/*
+ * UFC-crypt: ultra fast crypt(3) implementation
+ *
+ * Copyright (C) 1991, 1992, 1993, 1996 Free Software Foundation, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; see the file COPYING.LIB.  If not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * @(#)patchlevel.h	1.13 9/10/96
+ *
+ */
+
+#define PATCHLEVEL "UFC-crypt, patchlevel 1e, @(#)patchlevel.h	1.13 9/10/96"
diff --git a/crypt/sysdeps/unix/ufc-crypt.h b/crypt/sysdeps/unix/ufc-crypt.h
new file mode 100644
index 0000000000..879211afec
--- /dev/null
+++ b/crypt/sysdeps/unix/ufc-crypt.h
@@ -0,0 +1,29 @@
+/* Types for UFC-crypt.
+   Copyright (C) 1998 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Library General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Library General Public License for more details.
+
+   You should have received a copy of the GNU Library General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If not,
+   write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+   Boston, MA 02111-1307, USA.  */
+
+#include <stdint.h>
+
+typedef uint_fast32_t ufc_long;
+typedef uint64_t long64;
+typedef uint32_t long32;
+#if UINT_FAST32_MAX == UINT_FAST64_MAX
+# define _UFC_64_
+#else
+# define _UFC_32_
+#endif