ftp.nice.ch/pub/next/connectivity/infosystems/WAIStation.1.9.6.N.b.tar.gz#/WAIS/ir/irsearch.c

This is irsearch.c in view mode; [Download] [Up]

/* WIDE AREA INFORMATION SERVER SOFTWARE
   No guarantees or restrictions.  See the readme file for the full standard
   disclaimer.    
   Brewster@think.com
*/

/* Looks up words in the inverted file index.
 *
 * Important functions:
 * run_search
 * search_for_words
 *
 * to do:
 *    Handle searches on multiple databases
 */
 
/* Change Log:
 * $Log:	irsearch.c,v $
 * Revision 1.54  92/05/10  14:44:35  jonathan
 * Made a little safer on NULL docid's when parsing.
 * 
 * Revision 1.53  92/05/04  17:20:11  jonathan
 * Added test for parsing docids (if null, log error).
 * 
 * Revision 1.52  92/04/29  08:22:17  shen
 * declare global variable "_BE_normalized" to allow turning on/off FE score
 * normalization.
 * 
 * Revision 1.51  92/04/28  16:56:30  morris
 * added boolean to serial engine
 * 
 * Revision 1.50  92/04/01  17:10:21  jonathan
 * ?
 * 
 * Revision 1.49  92/03/23  13:26:27  shen
 * add timing for query. Compile with GET_QUERY_TIMING. print timing every 200 queries.
 * 
 * Revision 1.48  92/03/18  08:56:00  jonathan
 * Removed databaseName argument to getDocumentText and getData.
 * 
 * Revision 1.47  92/02/17  16:22:42  jonathan
 * Added WCAT to types that can be used for relevance feedback.
 * 
 * Revision 1.46  92/02/16  18:04:38  jonathan
 * Demoted more WLOG_ERROR's to WLOG_WARNING's
 * 
 * Revision 1.45  92/02/16  09:51:12  jonathan
 * Plugged some memory leaks.  I be there are more.
 * 
 * Revision 1.44  92/02/15  19:41:20  jonathan
 * Improved logging for invalid relevant documents.
 * 
 * Revision 1.43  92/02/14  16:06:48  jonathan
 * Added diagnostic record for invalid relevant document.
 * 
 * Revision 1.42  92/02/12  17:30:20  jonathan
 * Conditionalized inclusion of object code.
 * 
 * Revision 1.41  92/02/12  17:04:03  jonathan
 * Moved logging info around.
 * 
 * Revision 1.40  92/02/12  15:26:35  morris
 * only call fnished_search_word when the preceeding search was successful
 * 
 * Revision 1.39  92/02/12  13:30:39  jonathan
 * Added "$Log" so RCS will put the log message in the header
 * 
 * changes 5.2.90 HWM
	- changed calls to perror() to calls to panic()
	- made print_best_hits() only print hits w/ non-zero weight
	- made random arrays static instead of reading them in.  
	  removed getRandomArray.
	- removed unused variables
  Brewster 7/90 made look_up_word_in_dictionary safer.
  Brewster 7/90 elimiated trailing <lf> on filename and headline table accesses
  HWM 7.12.90 - replaced all calls to panic with error code returns and a log
                file  
	      - added the routine initSearchEngine() which should be called 
	        before any other search routine
	      - added beFriendly() to give other processes time under 
	        multifinder
  JG 5.31.91 - added relevance feedback for line fragments.
  JG 7.8.91  - added doc_id to search_for_words, removed scale_scores.
*/

#if 0
#define GET_QUERY_TIMING
#endif

#define _search_c

#include <ctype.h>

#include <string.h> 	/* for strlen() */
#ifdef THINK_C
#include <unix.h> 		/* for sleep() */
#endif /* think_c */

#include "cutil.h"
#include "irfiles.h"
#include "irtfiles.h" /* for map_over_words */
#include "irlex.h"
#include "irext.h"
#include "irsearch.h"
#include "docid.h"
#include <math.h>
#include "irretrvl.h"
#ifdef BOOL
#include "irparse.h"
#endif
#include "trie.h"

#define TEST_SEARCH 	false	/* set to TRUE to allow printing to console */

char *server_name = NULL;
long tcp_port = 0;

long _BE_normalized = 0;

#ifdef GET_QUERY_TIMING
#include <sys/timeb.h>
static struct timeb  s_time, e_time;
static float t_time = 0;
static long n_query = 0;
#endif


/*----------------------------------------------------------------------*/

static Boolean calcDocLength _AP((hit* theHit,long* lines,long* bytes));

static Boolean
calcDocLength(theHit,lines,bytes)
hit* theHit;
long* lines;
long* bytes;
/* Given a hit, open the file and figure out how many bytes and lines
   it contains.  This is not needed by the serial search engine (it
   stores these values in its dictionary.  It is used by the dynamic
   help facility).
*/
{
  *lines = theHit->number_of_lines;

  /* find the length of the document */
  if(theHit->end_character != 0)
    {
      /* document is not whole file, so size is stored */
      *bytes = theHit->end_character - theHit->start_character;
      return(true);
    }
  else
    {	
      /* whole file, find file length from the file */
      FILE* file = NULL;
      if (((file = s_fopen(theHit->filename, "r")) != NULL) &&
	  (s_fseek(file, 0L, SEEK_END) == 0)  &&
	  ((*bytes = ftell(file)) != -1))
	{ s_fclose(file);
	  return(true);		/* we are done, bytes is set */
	}
      else
	{ s_fclose(file);
	  return(false);	/* something went wrong with the file */
	}
    }
}

static long search_word_before_pairs _AP((char *word, long char_pos,
				       long line_pos, long weight,
				       long doc_id, time_t date,
				       boolean capitalized, database* db));

/* returns 0 is successful, non-0 if error.  A copy of add_word_before_pairs */
static long search_word_before_pairs (word, char_pos, line_pos,
				      weight, doc_id, date, capitalized, db)
     char *word;	/* the word to be indexed, this could be a
			   word pair. If NULL there are no more words
			   to be indexed */
     long char_pos;	/* the position of the start of the
			   word */
     long line_pos;	/* this is passed for the best
			   section calculation */
     long weight;	/* how important the word looks
			   syntactically (such as is it bold)
			   NOT used by signature system */
     long doc_id; 	/* current document, this will never be 0 */
     time_t date; /* display day of this document, 0 if not known */
     boolean capitalized; /* if the word started with a cap */
     database* db; /* database to insert the document */
{
  static char last_word[MAX_WORD_LENGTH + 1];
  static long last_doc_id = -1;
  /* The way it works is it remembers if the last word if it was
     capitalized (if not it clears the saved word).  
     If another capitalized word comes along next
     (and it is in the same document), then it makes a joint word and calls 
     add_word with it. */
  if(capitalized){
    if(last_word[0] != '\0' && last_doc_id == doc_id){
      search_word(make_joint_word(last_word, word), 
		  char_pos, line_pos, weight, doc_id, 1L, db);
    }
    else{
      last_word[0] = '\0';
    }
    strncpy(last_word, word, MAX_WORD_LENGTH);
    last_doc_id = doc_id;
  }
  else{ /* not capitalized */
    last_word[0] = '\0';
  }
  return(search_word(word, char_pos, 
		     line_pos, weight, doc_id, 0L, db));
}

long count_trie_words;
long count_uniq;

boolean prepare_word_list(words,set,alloc)
     char* words;
     trie* set;
     trie_allocator* alloc;
{
  char* word = NULL;
  int * datum;
  count_trie_words = count_uniq = 0;
  /* printf("words: %s\n", words); */
/*
  word = strtokf(words,wordDelimiter);
*/
  word = (char*)strtokf_isalnum(words);
  while(word != NULL){
    long dictionary_value;
    /* trim the string if necessary */
    if(strlen(word) > MAX_WORD_LENGTH){
      word[MAX_WORD_LENGTH] = '\0';
    }
    if(!encode((unsigned char*)word)) {
      panic("can't encode word %s",word);
    }
    datum = (int *)trie_lookup(word,set,alloc);
    if(!datum) {
      panic("trie_lookup failed !!!");
    }

      count_trie_words++;

    *datum += 1;

      if (*datum == 1 ) {
      count_uniq++;
      }
    word = (char *)strtokf_isalnum(NULL);
    beFriendly();
  } 

  waislog(WLOG_LOW, WLOG_INFO,
	  "after preparing word list, %d search items were presented.",
	  count_trie_words);
  waislog(WLOG_LOW, WLOG_INFO, 
	  "There are %d words to search for.",
	  count_uniq);

  return(true);
}

boolean search_for_trie_words(dict,db,prefix,docid,result)
trie* dict;
database* db;
char* prefix;
long docid;
boolean result;
{
  char buffer[MAX_WORD_LENGTH+1];
  char tmp_word[MAX_WORD_LENGTH+1];
  char* word;
  long dictionary_value;
  int weight;
  char* tmp=word;
  if (dict == NULL) {
    return result;
  }
  if (*dict->string) {
    strcpy(buffer,prefix);
    strcat(buffer,dict->string);
    word = buffer;
  } else {
    word = prefix;
  }

  if (dict->datum) {
    long number_of_occurrences;
    /* this node has data */
    strcpy(tmp_word,word);
    decode(tmp_word);
    result |= search_word(tmp_word,0L,0L,1L,docid,0L,db);
  }
  if (dict->table) {
    int i;
    int len;
    len = strlen(word);
    for (i=0;i<ALPHA_SIZE;i++) {
      if(dict->table[i]) {
	word[len]=(char)i;
	word[len+1]='\0';
	result = search_for_trie_words(dict->table[i],db,word,docid,result);
      }
    }
  }
  return result;
}

boolean search_for_words(words, db, doc_id)
     char* words;
     /* break the string into words (using map_over_words)
	and repeatedly call search_word_before_pairs(). 
	Returns true if successful.
	*/
     database *db;
     long doc_id;
{

#ifdef BOOL
  /* LISP QUERY */
  if( words[0] == '(' ){ /* then it is a lisp query */
    /* this is a temporary stub for the real work */
    char error_string[ERROR_STRING_LEN];
    object* query = (object*)parseQuery(words,QUERY_SYNTAX_LISP,error_string);
    if(query == NULL){
      waislog(WLOG_HIGH, WLOG_ERROR, "Unparsable query %s", error_string);
      return(false);
    }
    else{
      query = (object*)send(query,Evaluate,db);
      return(true);
    }
  }
#endif  /* def BOOL */

  /* NORMAL QUERY */
  if( -1 == map_over_words(words, doc_id, 1L, 0L, NULL, NULL, db, 
			   (wordfunc*)search_word_before_pairs, 0L, 0L))
    return(false);
  else
    return(true);
}

/* gets the next best hit from the search engine and fills in all the slots.
   If the document does not exist, then it gets another, etc.
   It returns 0 if successful */   
long next_best_hit(the_best_hit, db)
     hit *the_best_hit;
     database *db;
{
  document_table_entry doc_entry;
  long ret_value;
  while(1){ /* keep going until we get a good document */
    if(0 != (ret_value = best_hit(db,&(the_best_hit->document_id),
				  &(the_best_hit->best_character),
				  &(the_best_hit->best_line),
				  &(the_best_hit->weight))))
      return(ret_value);
    if(the_best_hit->weight <= 0)	/* if we are out of good stuff, return */
      return(1);
    /* fill in the rest of the hit */
    if (read_document_table_entry(&doc_entry,
				  the_best_hit->document_id,
				  db) 
	== true){
      the_best_hit->start_character = doc_entry.start_character;
      the_best_hit->end_character = doc_entry.end_character;
      the_best_hit->document_length = doc_entry.document_length;
      the_best_hit->number_of_lines = doc_entry.number_of_lines;
      sprintf(the_best_hit->date, "%d", doc_entry.date);
      read_filename_table_entry(doc_entry.filename_id, 
				the_best_hit->filename,
				the_best_hit->type,
				NULL,
				db),
      strncpy(the_best_hit->headline, 
	      read_headline_table_entry(doc_entry.headline_id,db),
	      MAX_HEADLINE_LEN);
      if(probe_file_possibly_compressed(the_best_hit->filename))
	return(0);  /* we win */
      else { /* we lose */
	waislog(WLOG_HIGH, WLOG_WARNING, 
		"Dangling File %s in database %s.", 
		the_best_hit->filename,
		db->database_file);
      }
    }
    else {
      waislog(WLOG_HIGH, WLOG_ERROR, 
	      "Error reading doc_table_entry for database %s, docid: %ld",
	      db->database_file,
	      the_best_hit->document_id);
    }
    beFriendly();
  }
}

/*----------------------------------------------------------------------*/

/* this function figures out if the request is for a NEXT or Previous document.
   If it is, then it makes a header for it and returns it.  If not, then it 
   returns NULL. */

WAISDocumentHeader*
handle_next_and_previous(docs, db, waisProtocolVersion, server)
DocObj** docs;
database* db;
long waisProtocolVersion;
char* server;
{
  char* dbName = db->database_file;
  WAISDocumentHeader* header;
  DocID* theDocID = NULL;
  char *local_id;

  if(docs != NULL) { /* All of this is for WAIS_Prev and WAIS_next */
    if(docs[0] != NULL && docs[0]->Type != NULL) {
      long id = -1;

      if((theDocID = docIDFromAny(docs[0]->DocumentID)) == NULL) {
	waislog(WLOG_HIGH, WLOG_WARNING, "can't parse docid");
	return(NULL);
      }

      local_id = anyToString(GetLocalID(theDocID));

      if(strcmp(docs[0]->Type,"WAIS_NEXT") == 0)
	id = next_docid(local_id,db);
      else if(strcmp(docs[0]->Type,"WAIS_PREV") == 0)
	id = previous_docid(local_id, db);

      freeDocID(theDocID); s_free(local_id);

      if (id > -1) {
	document_table_entry doc_entry;
	hit foo;
	long lines,length;
	char local_id[MAX_FILENAME_LEN + 60]; /* filename, start, end */

	local_id[0] = '\0';

	if (read_document_table_entry(&doc_entry, id, db) == true) {
	  foo.start_character = doc_entry.start_character;
	  foo.end_character = doc_entry.end_character;
	  foo.document_length = doc_entry.document_length;
	  foo.number_of_lines = doc_entry.number_of_lines;

	  read_filename_table_entry(doc_entry.filename_id, 
				    foo.filename,
				    foo.type,
				    NULL,
				    db),
	  strncpy(foo.headline, 
		  read_headline_table_entry(doc_entry.headline_id,db),
		  MAX_HEADLINE_LEN);
	  sprintf(foo.date, "%d", doc_entry.date);
	  sprintf(local_id, "%ld %ld %s", 
		  doc_entry.start_character,
		  doc_entry.end_character,
		  foo.filename);
		
	  if(calcDocLength(&(foo),&lines,&length)){
	    /* this document is good, return it */
	    char** type = NULL;
		
	    if (waisProtocolVersion >= '2'){
	      type = (char**)s_malloc((size_t)(sizeof(char*) * 2));
	      type[0] = s_strdup(foo.type);
	      type[1] = NULL;
	    }
	    else
	      type = NULL;
	       
	    theDocID = makeDocID();

	    theDocID->distributorServer = stringToAny(server); 
	    theDocID->originalServer = stringToAny(server);	
	    theDocID->distributorDatabase = stringToAny(dbName);
	    theDocID->originalDatabase = stringToAny(dbName);
	    theDocID->distributorLocalID = stringToAny(local_id);
	    theDocID->originalLocalID = stringToAny(local_id);

	    header=
	      makeWAISDocumentHeader(anyFromDocID(theDocID),
				     UNUSED,
				     -1L,
				     UNUSED,
				     length,lines,
				     type,
				     s_strdup(dbName),
				     s_strdup(foo.date),
				     s_strdup(foo.headline),
				     NULL);
	    freeDocID(theDocID);
	    return(header);
	  }
	  else{ 
	    waislog(WLOG_HIGH, WLOG_WARNING, 
		    "document <%ld %ld %s> skipped.",
		    doc_entry.start_character,
		    doc_entry.end_character,
		    foo.filename);
	    return(NULL);
	  }
	}
      }
    }
  }
  return(NULL);
}

/*----------------------------------------------------------------------*/
/* search for each of the words in a document, up to a limit.
   this is for relevance feedback. */

#define MAX_TEXT_SIZE 100000	/* Maximume size of relevant text */

/* returns true if it added the words, false otherwise (not necessarily 
   an error) */
boolean search_for_words_in_document(doc, docid, db, diags, num_diags)
DocObj* doc;
long docid;
database* db;
diagnosticRecord*** diags;  /* list of diagnostics */
long *num_diags;
{
  char * dbName = db->database_file;
  long errorCode;
  WAISDocumentText* doctext;

  char prefix[MAX_WORD_LENGTH+1];
  trie *the_dict;
  trie_allocator* alloc;
  count_trie_words =0;
  count_uniq=0;
    
  alloc=make_trie_allocator();
  the_dict = new_trie("",alloc);
  *prefix = 0;

  if(doc->Type == NULL ||
     substrcmp(doc->Type,"TEXT") ||
     strcmp(doc->Type,"WSRC") == 0 ||
     strcmp(doc->Type,"WCAT") == 0 ||
     doc->Type[0] == 0) {

    doctext = NULL;
    if (doc->ChunkCode == CT_line)
      doctext = getDocumentText(doc, &errorCode, NULL);
    else if ((doc->ChunkCode == CT_byte) ||
	     (doc->ChunkCode == CT_document))
      doctext = getData(doc, &errorCode, NULL);
    if (doctext != NULL) {

      boolean search_result;

      if(doctext->DocumentText->size > MAX_TEXT_SIZE)
	doctext->DocumentText->bytes[MAX_TEXT_SIZE] = 0;
      search_result = prepare_word_list(doctext->DocumentText->bytes,the_dict,alloc);  
      search_result |= search_for_trie_words(the_dict,db,prefix,docid,search_result);
      dispose_trie_allocator(alloc);

      freeWAISDocumentText(doctext);
      return(search_result);
    }
    else { /* bad docid? */
      DocID* theDocID = NULL;
      char* local_id = NULL;
      diagnosticRecord* diag = NULL;
      char msg[MAX_FILENAME_LEN * 2];

      theDocID = docIDFromAny(doc->DocumentID);
      
      if(theDocID == NULL) {
	local_id = s_strdup("can't parse docid");
      }
      else {
	local_id = anyToString(GetLocalID(theDocID));
  
	freeDocID(theDocID);
      }
      waislog(WLOG_HIGH, WLOG_WARNING,
	      "Relevance Feedback with invalid doc-id: '%s'",
	      local_id);
      strncpy(msg,"Relevant Document not available: ",
	      MAX_FILENAME_LEN);
      s_strncat(msg,local_id,MAX_FILENAME_LEN,MAX_FILENAME_LEN);
      s_free(local_id);
      (*num_diags)++;
      diag = makeDiag(true,D_TemporarySystemError,msg);
      *diags = (diagnosticRecord**)s_realloc(*diags,(size_t)(sizeof(diagnosticRecord*) * *num_diags));
      (*diags)[(*num_diags)-1] = diag;
    }

  }
  return(false);
}


/*----------------------------------------------------------------------*/

WAISDocumentHeader*
best_hit_to_header(best_hit, maxRawScore, waisProtocolVersion, server, db)
hit* best_hit;
long maxRawScore;
long waisProtocolVersion;
char *server;
database* db;
{
  long lines,length;
  DocID* theDocID = NULL;
  WAISDocumentHeader* header;
  char* originName = db->database_file;
  char local_id[MAX_FILENAME_LEN + 60]; /* filename, start, end */
  local_id[0] = '\0';

  if (true == calcDocLength(best_hit,&lines,&length))
    {				/* this document is good, return it */
      char** type = NULL;
      long normalScore;
      if ( _BE_normalized )
         normalScore = best_hit->weight;
      else {
         normalScore = (long)floor(
				(((double)best_hit->weight) /
				 ((double)maxRawScore)) *	
				(MAX_NORMAL_SCORE + 1));
      
        if (normalScore > MAX_NORMAL_SCORE)
  	   normalScore = MAX_NORMAL_SCORE;
      }

      sprintf(local_id, "%ld %ld %s", 
	      best_hit->start_character,
	      best_hit->end_character,
	      best_hit->filename);
         
      if (waisProtocolVersion >= '2') {
	type = (char**)s_malloc((size_t)(sizeof(char*) * 2));
	type[0] = s_strdup(best_hit->type);
	type[1] = NULL;
      }
      else
	type = NULL;
      /*
	printf("header %ld out of %ld\n", *headerNum, 
	wais_search->MaxDocumentsRetrieved); 
	*/
      theDocID = makeDocID();

      theDocID->distributorServer = stringToAny(server);
      theDocID->originalServer = stringToAny(server);
	      
      theDocID->distributorDatabase = stringToAny(originName);
      theDocID->originalDatabase = stringToAny(originName);
		  
      theDocID->distributorLocalID = stringToAny(local_id);
      theDocID->originalLocalID = stringToAny(local_id);

      header =
	makeWAISDocumentHeader(anyFromDocID(theDocID),
			       UNUSED,
			       (long)normalScore,
			       best_hit->best_line,
			       length,lines,
			       type,
			       s_strdup(originName),
			       s_strdup(best_hit->date),
			       s_strdup(best_hit->headline),
			       NULL);
      freeDocID(theDocID);
      return(header);
    }
  else
    { 
      waislog(WLOG_HIGH, WLOG_WARNING, 
	      "document <%ld %ld %s> skipped.",
	      best_hit->start_character,
	      best_hit->end_character,
	      best_hit->filename);
      return(NULL);
    }
}



/*----------------------------------------------------------------------*/

boolean run_search(aSearch, headers, diags, index_directory, 
		   seed_words_used, waisProtocolVersion, headerNum)
SearchAPDU* aSearch;
WAISDocumentHeader** headers; /* list of results */
diagnosticRecord*** diags;  /* list of diagnostics */
char *index_directory;
char **seed_words_used;  /* called with enough space */
long waisProtocolVersion;
long *headerNum;
/* runs a search on the inverted file index and returns false if it errors 
   in such a way that it can not even make a diagnostic record 
   (should not happen).
   It changes headers with the replies or makes a diagnostic record
 */
{ 
  diagnosticRecord* diag = NULL;
  WAISSearch* wais_search = (WAISSearch*)aSearch->Query; /* for convenience */
  database* db = NULL;
  long maxRawScore;
  long i;
  query_parameter_type parameters;
  boolean search_result;
  char server[255];
  WAISDocumentHeader* header;
  long num_diags = 0;
  char dbName[MAX_FILENAME_LEN * 2];

  if (aSearch->DatabaseNames == NULL)
    strcpy(dbName,merge_pathnames(INFO_DATABASE_NAME, index_directory));
  else
    strcpy(dbName,merge_pathnames(aSearch->DatabaseNames[0], index_directory));

#ifdef GET_QUERY_TIMING
  ftime(&s_time);
#endif

  /* strlip .src if it is on the name */
  if(strlen(dbName) > strlen(".src"))
    if(0 == strcmp(dbName + strlen(dbName) - strlen(".src"),
		   ".src"))
      dbName[strlen(dbName) - strlen(".src")] = '\0';
  
  if(server_name != NULL)
    sprintf(server, "%s:%d", server_name, tcp_port);
  else
    sprintf(server, "localhost:0");

  db = openDatabase(dbName, false, true);
  if (db == NULL){
    char msg[MAX_FILENAME_LEN * 2];
    strncpy(msg,"The following database is not available: ",
	    MAX_FILENAME_LEN);
    s_strncat(msg,dbName,MAX_FILENAME_LEN,MAX_FILENAME_LEN);
    diag = makeDiag(false,D_PermanentSystemError,msg);
    *diags = (diagnosticRecord **)s_realloc(*diags,(size_t)(sizeof(diagnosticRecord*) * 2));
    (*diags)[0] = diag;
    (*diags)[1] = NULL;
    return(false);
  }

  /* figure out if it is a NEXT or PREVIOUS, if so, return it. */
  header = handle_next_and_previous(wais_search->Docs, db, 
				    waisProtocolVersion, server);
  if(header != NULL){
    headers[(*headerNum)++] = header;
    headers[*headerNum] = NULL;
    return(true);
  }
  
  /* until seed_words_used is supported */
  strcpy(*seed_words_used, wais_search->SeedWords);

  parameters.max_hit_retrieved = wais_search->MaxDocumentsRetrieved;
  set_query_parameter(SET_MAX_RETRIEVED_MASK, &parameters);

  search_result = false;
  init_search_word(db);

#ifdef RELEVANCE_FEEDBACK
  if(wais_search->Docs != NULL) {
    DocObj* doc = NULL;
    boolean res;
    /* assemble the elements and construct a response */
    for (i = 0, doc = wais_search->Docs[i]; 
	 doc != NULL; 
	 doc = wais_search->Docs[++i]){
      search_result |= 
	search_for_words_in_document(doc,i+1,db,diags,&num_diags);
    }
    if (*diags != NULL) {
	num_diags++;
	*diags = (diagnosticRecord**)s_realloc(*diags,(size_t)(sizeof(diagnosticRecord*) * num_diags));
	(*diags)[num_diags-1] = NULL;
      }
  }
#endif				/* RELEVANT_FEEDBACK */

  search_result |= search_for_words(wais_search->SeedWords, db, 0);

  if (search_result == true){ /* the search went ok */
      hit best_hit;
      finished_search_word(db);
      init_best_hit(db);
      for (i = 0; i < wais_search->MaxDocumentsRetrieved; i++){ 
	if(0 != next_best_hit(&best_hit, db))
	  break;		/* out of hits */
	if(i == 0)
	  maxRawScore = best_hit.weight;
	if (best_hit.weight > 0){
	  WAISDocumentHeader* header = 
	    best_hit_to_header(&best_hit, maxRawScore,
			       waisProtocolVersion,server,db);
	  if(NULL != header){
	    headers[(*headerNum)++] = header;
	    headers[*headerNum] = NULL;
	  }
	}
      }
    }
  else
    {				/* something went awry in the search */
      num_diags++;
      diag = makeDiag(true,D_PermanentSystemError,
		      "Serious error in server");
      *diags = (diagnosticRecord**)
	s_realloc(*diags, (size_t)(sizeof(diagnosticRecord*) * num_diags));
      (*diags)[num_diags-2] = diag;
      (*diags)[num_diags-1] = NULL;
    }
  finished_best_hit(db);
  /* free everything */
  closeDatabase(db);
#ifdef GET_QUERY_TIMING
  ftime(&e_time);
  t_time += (e_time.time + e_time.millitm/1000.0) - 
            (s_time.time + s_time.millitm/1000.0);
  n_query++;
  if ( n_query == 200 ) {
   waislog(WLOG_LOW, WLOG_INFO, "searching 200 queries takes %f seconds.",
           t_time);
   waislog(WLOG_LOW, WLOG_INFO, "average %f/query.", t_time/200.0);
   n_query = 0;
   t_time = 0;
   }
#endif

  return(true);
}

These are the contents of the former NiCE NeXT User Group NeXTSTEP/OpenStep software archive, currently hosted by Netfuture.ch.