From a37db4c56468bc7f41e14e75acf6f4c266eafd4c Mon Sep 17 00:00:00 2001 From: Julien Rouhaud Date: Tue, 25 Oct 2022 15:26:50 +0900 Subject: [PATCH v14 4/5] Allow file inclusion in pg_hba and pg_ident files. pg_hba.conf file now has support for "include", "include_dir" and "include_if_exists" directives, which work similarly to the same directives in the postgresql.conf file. This fixes a possible crash if a secondary file tries to include itself as there's now a nesting depth check in the inclusion code path, same as the postgresql.conf. Many regression tests added to cover both the new directives, but also error detection for the whole pg_hba / pg_ident files. Catversion is bumped. Author: Julien Rouhaud Reviewed-by: FIXME Discussion: https://postgr.es/m/20220223045959.35ipdsvbxcstrhya%40jrouhaud --- doc/src/sgml/client-auth.sgml | 86 ++- doc/src/sgml/system-views.sgml | 22 +- src/backend/libpq/hba.c | 374 ++++++++-- src/backend/libpq/pg_hba.conf.sample | 25 +- src/backend/libpq/pg_ident.conf.sample | 15 +- src/backend/utils/adt/hbafuncs.c | 20 +- src/backend/utils/misc/guc-file.l | 229 +++--- src/include/catalog/pg_proc.dat | 12 +- src/include/libpq/hba.h | 3 +- src/include/utils/guc.h | 2 + .../authentication/t/003_file_inclusion.pl | 657 ++++++++++++++++++ src/test/regress/expected/rules.out | 6 +- 12 files changed, 1239 insertions(+), 212 deletions(-) create mode 100644 src/test/authentication/t/003_file_inclusion.pl diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml index 32d5d45863..2ae723de66 100644 --- a/doc/src/sgml/client-auth.sgml +++ b/doc/src/sgml/client-auth.sgml @@ -89,8 +89,23 @@ - Each record specifies a connection type, a client IP address range - (if relevant for the connection type), a database name, a user name, + Each record can either be an inclusion directive or an authentication + record. Inclusion directives specify files that can be included, which + contains additional records. The records will be inserted in lieu of the + inclusion records. Those records only contains two fields: the + include, include_if_exists or + include_dir directive and the file or directory to be + included. The file or directory can be a relative of absolute path, and can + be double quoted if needed. For the include_dir form, + all files not starting with a . and ending with + .conf will be included. Multiple files within an include + directory are processed in file name order (according to C locale rules, + i.e., numbers before letters, and uppercase letters before lowercase ones). + + + + Each authentication record specifies a connection type, a client IP address + range (if relevant for the connection type), a database name, a user name, and the authentication method to be used for connections matching these parameters. The first record with a matching connection type, client address, requested database, and user name is used to perform @@ -103,21 +118,57 @@ A record can have several formats: -local database user auth-method auth-options -host database user address auth-method auth-options -hostssl database user address auth-method auth-options -hostnossl database user address auth-method auth-options -hostgssenc database user address auth-method auth-options -hostnogssenc database user address auth-method auth-options -host database user IP-address IP-mask auth-method auth-options -hostssl database user IP-address IP-mask auth-method auth-options -hostnossl database user IP-address IP-mask auth-method auth-options -hostgssenc database user IP-address IP-mask auth-method auth-options -hostnogssenc database user IP-address IP-mask auth-method auth-options +include file +include_if_exists file +include_dir directory +local database user auth-method auth-options +host database user address auth-method auth-options +hostssl database user address auth-method auth-options +hostnossl database user address auth-method auth-options +hostgssenc database user address auth-method auth-options +hostnogssenc database user address auth-method auth-options +host database user IP-address IP-mask auth-method auth-options +hostssl database user IP-address IP-mask auth-method auth-options +hostnossl database user IP-address IP-mask auth-method auth-options +hostgssenc database user IP-address IP-mask auth-method auth-options +hostnogssenc database user IP-address IP-mask auth-method auth-options The meaning of the fields is as follows: + + include + + + This line will be replaced with the content of the given file. + + + + + + include_if_exists + + + This line will be replaced with the content of the given file if the + file exists and can be read. Otherwise, a message will be logged to + indicate that the file is skipped. + + + + + + include_dir + + + This line will be replaced with the content of all the files found in + the directory, if they don't start with a . and end + with .conf, processed in file name order (according + to C locale rules, i.e., numbers before letters, and uppercase letters + before lowercase ones). + + + + local @@ -863,8 +914,10 @@ local db1,db2,@demodbs all md5 cluster's data directory. (It is possible to place the map file elsewhere, however; see the configuration parameter.) - The ident map file contains lines of the general form: + The ident map file contains lines of two general form: +include file +include_dir directory map-name system-username database-username Comments, whitespace and line continuations are handled in the same way as in @@ -875,6 +928,11 @@ local db1,db2,@demodbs all md5 database user name. The same map-name can be used repeatedly to specify multiple user-mappings within a single map. + + As for pg_hba.conf, the lines in this file can either + be inclusion directives or user name map records, and follow the same + rules. + There is no restriction regarding how many database users a given operating system user can correspond to, nor vice versa. Thus, entries diff --git a/doc/src/sgml/system-views.sgml b/doc/src/sgml/system-views.sgml index 4723f712a7..7d1cec8b7f 100644 --- a/doc/src/sgml/system-views.sgml +++ b/doc/src/sgml/system-views.sgml @@ -1003,12 +1003,21 @@ + + + file_name text + + + Name of the file containing this rule + + + line_number int4 - Line number of this rule in pg_hba.conf + Line number of this rule the given file_name @@ -1153,12 +1162,21 @@ + + + file_name text + + + Name of the file containing this mapping + + + line_number int4 - Line number of this rule in pg_ident.conf + Line number of this mapping in the given file_name diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index 56bbe31dfd..74f46ffb04 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -70,6 +71,12 @@ typedef struct check_network_data #define token_is_keyword(t, k) (!t->quoted && strcmp(t->string, k) == 0) #define token_matches(t, k) (strcmp(t->string, k) == 0) +typedef enum HbaIncludeKind +{ + SecondaryAuthFile, + IncludedAuthFile +} HbaIncludeKind; + /* * pre-parsed content of HBA config file: list of HbaLine structs. * parsed_hba_context is the memory context where it lives. @@ -115,14 +122,26 @@ static const char *const UserAuthName[] = }; +static void tokenize_file_with_context(MemoryContext linecxt, + const char *filename, FILE *file, + List **tok_lines, int depth, + int elevel); static List *tokenize_inc_file(List *tokens, const char *outer_filename, - const char *inc_filename, int elevel, char **err_msg); + const char *inc_filename, int depth, int elevel, + char **err_msg); static bool parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int elevel, char **err_msg); static int regcomp_auth_token(AuthToken *token, char *filename, int line_num, char **err_msg, int elevel); static int regexec_auth_token(const char *match, AuthToken *token, size_t nmatch, regmatch_t pmatch[]); +static FILE *open_inc_file(HbaIncludeKind kind, const char *inc_filename, + bool strict, const char *outer_filename, int elevel, + char **err_msg, char **inc_fullname); +static char *process_included_authfile(const char *inc_filename, bool strict, + const char *outer_filename, int depth, + int elevel, MemoryContext linecxt, + List **tok_lines); /* @@ -413,7 +432,7 @@ regexec_auth_token(const char *match, AuthToken *token, size_t nmatch, */ static List * next_field_expand(const char *filename, char **lineptr, - int elevel, char **err_msg) + int depth, int elevel, char **err_msg) { char buf[MAX_TOKEN]; bool trailing_comma; @@ -429,7 +448,7 @@ next_field_expand(const char *filename, char **lineptr, /* Is this referencing a file? */ if (!initial_quote && buf[0] == '@' && buf[1] != '\0') - tokens = tokenize_inc_file(tokens, filename, buf + 1, + tokens = tokenize_inc_file(tokens, filename, buf + 1, depth + 1, elevel, err_msg); else tokens = lappend(tokens, make_auth_token(buf, initial_quote)); @@ -457,6 +476,7 @@ static List * tokenize_inc_file(List *tokens, const char *outer_filename, const char *inc_filename, + int depth, int elevel, char **err_msg) { @@ -466,39 +486,30 @@ tokenize_inc_file(List *tokens, ListCell *inc_line; MemoryContext linecxt; - if (is_absolute_path(inc_filename)) - { - /* absolute path is taken as-is */ - inc_fullname = pstrdup(inc_filename); - } - else + /* + * Reject too-deep include nesting depth. This is just a safety check to + * avoid dumping core due to stack overflow if an include file loops back + * to itself. The maximum nesting depth is pretty arbitrary. + */ + if (depth > 10) { - /* relative path is relative to dir of calling file */ - inc_fullname = (char *) palloc(strlen(outer_filename) + 1 + - strlen(inc_filename) + 1); - strcpy(inc_fullname, outer_filename); - get_parent_directory(inc_fullname); - join_path_components(inc_fullname, inc_fullname, inc_filename); - canonicalize_path(inc_fullname); + *err_msg = psprintf("could not open configuration file \"%s\": maximum nesting depth exceeded", + inc_filename); + ereport(elevel, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("%s", *err_msg))); + return tokens; } - inc_file = AllocateFile(inc_fullname, "r"); - if (inc_file == NULL) - { - int save_errno = errno; + inc_file = open_inc_file(SecondaryAuthFile, inc_filename, true, + outer_filename, elevel, err_msg, &inc_fullname); - ereport(elevel, - (errcode_for_file_access(), - errmsg("could not open secondary authentication file \"@%s\" as \"%s\": %m", - inc_filename, inc_fullname))); - *err_msg = psprintf("could not open secondary authentication file \"@%s\" as \"%s\": %s", - inc_filename, inc_fullname, strerror(save_errno)); - pfree(inc_fullname); + if (inc_file == NULL) return tokens; - } /* There is possible recursion here if the file contains @ */ - linecxt = tokenize_auth_file(inc_fullname, inc_file, &inc_lines, elevel); + linecxt = tokenize_auth_file(inc_fullname, inc_file, &inc_lines, depth + 1, + elevel); FreeFile(inc_file); pfree(inc_fullname); @@ -536,11 +547,38 @@ tokenize_inc_file(List *tokens, /* * tokenize_auth_file - * Tokenize the given file. + * + * Wrapper around tokenize_file_with_context, creating a dedicated memory + * context. + * + * Return value is this memory context which contains all memory allocated by + * this function (it's a child of caller's context). + */ +MemoryContext +tokenize_auth_file(const char *filename, FILE *file, List **tok_lines, + int depth, int elevel) +{ + MemoryContext linecxt; + linecxt = AllocSetContextCreate(CurrentMemoryContext, + "tokenize_auth_file", + ALLOCSET_SMALL_SIZES); + + *tok_lines = NIL; + + tokenize_file_with_context(linecxt, filename, file, tok_lines, depth, + elevel); + + return linecxt; +} + +/* + * Tokenize the given file. * * The output is a list of TokenizedAuthLine structs; see the struct definition * in libpq/hba.h. * + * linecxt: memory context which must contain all memory allocated by the + * function * filename: the absolute path to the target file * file: the already-opened target file * tok_lines: receives output list @@ -549,30 +587,22 @@ tokenize_inc_file(List *tokens, * Errors are reported by logging messages at ereport level elevel and by * adding TokenizedAuthLine structs containing non-null err_msg fields to the * output list. - * - * Return value is a memory context which contains all memory allocated by - * this function (it's a child of caller's context). */ -MemoryContext -tokenize_auth_file(const char *filename, FILE *file, List **tok_lines, - int elevel) +static void +tokenize_file_with_context(MemoryContext linecxt, const char *filename, + FILE *file, List **tok_lines, int depth, int elevel) { - int line_number = 1; StringInfoData buf; - MemoryContext linecxt; + int line_number = 1; MemoryContext oldcxt; - linecxt = AllocSetContextCreate(CurrentMemoryContext, - "tokenize_auth_file", - ALLOCSET_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(linecxt); initStringInfo(&buf); - *tok_lines = NIL; - while (!feof(file) && !ferror(file)) { + TokenizedAuthLine *tok_line; char *lineptr; List *current_line = NIL; char *err_msg = NULL; @@ -625,7 +655,7 @@ tokenize_auth_file(const char *filename, FILE *file, List **tok_lines, { List *current_field; - current_field = next_field_expand(filename, &lineptr, + current_field = next_field_expand(filename, &lineptr, depth, elevel, &err_msg); /* add field to line, unless we are at EOL or comment start */ if (current_field != NIL) @@ -633,30 +663,127 @@ tokenize_auth_file(const char *filename, FILE *file, List **tok_lines, } /* - * Reached EOL; emit line to TokenizedAuthLine list unless it's boring + * Reached EOL; no need to emit line to TokenizedAuthLine list if it's + * boring. */ - if (current_line != NIL || err_msg != NULL) + if (current_line == NIL && err_msg == NULL) + goto next_line; + + /* If the line is valid, check if that's an include directive */ + if (err_msg == NULL && list_length(current_line) == 2) { - TokenizedAuthLine *tok_line; + AuthToken *first, *second; + + first = linitial(linitial_node(List, current_line)); + second = linitial(lsecond_node(List, current_line)); + + if (strcmp(first->string, "include") == 0) + { + char *inc_filename; + + inc_filename = second->string; + + err_msg = process_included_authfile(inc_filename, true, + filename, depth + 1, elevel, linecxt, + tok_lines); + + if (!err_msg) + { + /* + * The line is fully processed, bypass the general + * TokenizedAuthLine processing. + */ + goto next_line; + } + } + else if (strcmp(first->string, "include_dir") == 0) + { + char **filenames; + char *dir_name = second->string; + int num_filenames; + StringInfoData err_buf; + + filenames = GetDirConfFiles(dir_name, filename, elevel, + &num_filenames, &err_msg); + + if (!filenames) + { + /* We have the error in err_msg, simply process it */ + goto process_line; + } + + initStringInfo(&err_buf); + for (int i = 0; i < num_filenames; i++) + { + /* + * err_msg is used here as a temp buffer, it will be + * overwritten at the end of the loop with the + * cumulated errors, if any. + */ + err_msg = process_included_authfile(filenames[i], true, + filename, depth + 1, elevel, + linecxt, tok_lines); + + /* Cumulate errors if any. */ + if (err_msg) + { + if (err_buf.len > 0) + appendStringInfoChar(&err_buf, '\n'); + appendStringInfoString(&err_buf, err_msg); + } + } + + /* + * If there were no errors, the line is fully processed, bypass + * the general TokenizedAuthLine processing. + */ + if (err_buf.len == 0) + goto next_line; + + /* Otherwise, process the cumulated errors, if any. */ + err_msg = err_buf.data; + } + else if (strcmp(first->string, "include_if_exists") == 0) + { + char *inc_filename; - tok_line = (TokenizedAuthLine *) palloc(sizeof(TokenizedAuthLine)); - tok_line->fields = current_line; - tok_line->file_name = pstrdup(filename); - tok_line->line_num = line_number; - tok_line->raw_line = pstrdup(buf.data); - tok_line->err_msg = err_msg; - *tok_lines = lappend(*tok_lines, tok_line); + inc_filename = second->string; + + err_msg = process_included_authfile(inc_filename, false, + filename, depth + 1, elevel, linecxt, + tok_lines); + + if (!err_msg) + { + /* + * The line is fully processed, bypass the general + * TokenizedAuthLine processing. + */ + goto next_line; + } + } } +process_line: + /* + * General processing: report the error if any and emit line to the + * TokenizedAuthLine + */ + tok_line = (TokenizedAuthLine *) palloc(sizeof(TokenizedAuthLine)); + tok_line->fields = current_line; + tok_line->file_name = pstrdup(filename); + tok_line->line_num = line_number; + tok_line->raw_line = pstrdup(buf.data); + tok_line->err_msg = err_msg; + *tok_lines = lappend(*tok_lines, tok_line); + +next_line: line_number += continuations + 1; } MemoryContextSwitchTo(oldcxt); - - return linecxt; } - /* * Does user belong to role? * @@ -2355,7 +2482,7 @@ load_hba(void) return false; } - linecxt = tokenize_auth_file(HbaFileName, file, &hba_lines, LOG); + linecxt = tokenize_auth_file(HbaFileName, file, &hba_lines, 0, LOG); FreeFile(file); /* Now parse all the lines */ @@ -2445,6 +2572,135 @@ load_hba(void) return true; } +/* + * Open the given file for inclusion in an authentication file, whether + * secondary or included. + */ +static FILE * +open_inc_file(HbaIncludeKind kind, const char *inc_filename, bool strict, + const char *outer_filename, int elevel, char **err_msg, + char **inc_fullname) +{ + FILE *inc_file; + + if (is_absolute_path(inc_filename)) + { + /* absolute path is taken as-is */ + *inc_fullname = pstrdup(inc_filename); + } + else + { + /* relative path is relative to dir of calling file */ + *inc_fullname = (char *) palloc(strlen(outer_filename) + 1 + + strlen(inc_filename) + 1); + strcpy(*inc_fullname, outer_filename); + get_parent_directory(*inc_fullname); + join_path_components(*inc_fullname, *inc_fullname, inc_filename); + canonicalize_path(*inc_fullname); + } + + inc_file = AllocateFile(*inc_fullname, "r"); + if (inc_file == NULL) + { + int save_errno = errno; + const char *msglog; + const char *msgview; + + if (strict) + { + switch (kind) + { + case SecondaryAuthFile: + msglog = "could not open secondary authentication file \"@%s\" as \"%s\": %m"; + msgview = "could not open secondary authentication file \"@%s\" as \"%s\": %s"; + break; + case IncludedAuthFile: + msglog = "could not open included authentication file \"%s\" as \"%s\": %m"; + msgview = "could not open included authentication file \"%s\" as \"%s\": %s"; + break; + default: + elog(ERROR, "unknown HbaIncludeKind: %d", kind); + break; + } + + ereport(elevel, + (errcode_for_file_access(), + errmsg(msglog, inc_filename, *inc_fullname))); + *err_msg = psprintf(msgview, inc_filename, *inc_fullname, + strerror(save_errno)); + } + else + { + Assert(kind == IncludedAuthFile); + ereport(LOG, + (errmsg("skipping missing authentication file \"%s\"", + *inc_fullname))); + } + + pfree(*inc_fullname); + *inc_fullname = NULL; + return NULL; + } + + return inc_file; +} + +/* + * Try to open an included file, and tokenize it using the given context. + * Returns NULL if no error happens during tokenization, otherwise the error. + */ +static char * +process_included_authfile(const char *inc_filename, bool strict, + const char *outer_filename, int depth, int elevel, + MemoryContext linecxt, List **tok_lines) +{ + char *inc_fullname; + FILE *inc_file; + char *err_msg = NULL; + + /* + * Reject too-deep include nesting depth. This is just a safety check to + * avoid dumping core due to stack overflow if an include file loops back + * to itself. The maximum nesting depth is pretty arbitrary. + */ + if (depth > 10) + { + err_msg = psprintf("could not open configuration file \"%s\": maximum nesting depth exceeded", + inc_filename); + ereport(elevel, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("%s", err_msg))); + return err_msg; + } + + inc_file = open_inc_file(IncludedAuthFile, inc_filename, strict, + outer_filename, elevel, &err_msg, &inc_fullname); + + if (inc_file == NULL) + { + if (strict) + { + /* open_inc_file should have reported an error. */ + Assert(err_msg != NULL); + return err_msg; + } + else + return NULL; + } + else + { + /* No error message should have been reported. */ + Assert(err_msg == NULL); + } + + tokenize_file_with_context(linecxt, inc_fullname, inc_file, + tok_lines, depth, elevel); + + FreeFile(inc_file); + pfree(inc_fullname); + + return NULL; +} /* * Parse one tokenised line from the ident config file and store the result in @@ -2728,7 +2984,7 @@ load_ident(void) return false; } - linecxt = tokenize_auth_file(IdentFileName, file, &ident_lines, LOG); + linecxt = tokenize_auth_file(IdentFileName, file, &ident_lines, 0, LOG); FreeFile(file); /* Now parse all the lines */ diff --git a/src/backend/libpq/pg_hba.conf.sample b/src/backend/libpq/pg_hba.conf.sample index 5f3f63eb0c..7433050112 100644 --- a/src/backend/libpq/pg_hba.conf.sample +++ b/src/backend/libpq/pg_hba.conf.sample @@ -9,16 +9,27 @@ # are authenticated, which PostgreSQL user names they can use, which # databases they can access. Records take one of these forms: # -# local DATABASE USER METHOD [OPTIONS] -# host DATABASE USER ADDRESS METHOD [OPTIONS] -# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] -# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] -# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] -# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# include FILE +# include_if_exists FILE +# include_dir DIRECTORY +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] # # (The uppercase items must be replaced by actual values.) # -# The first field is the connection type: +# If the first field is "include", "include_if_exists" or "include_dir", it's +# not a mapping record but a directive to include records from respectively +# another file, another file if it exists or all the files in the given +# directory ending in '.conf'. FILE is the file name to include, and +# DIR is the directory name containing the file(s) to include. FILE and +# DIRECTORY can be specified with a relative or absolute path, and can be +# double quoted if they contains spaces. +# +# Otherwise the first field is the connection type: # - "local" is a Unix-domain socket # - "host" is a TCP/IP socket (encrypted or not) # - "hostssl" is a TCP/IP socket that is SSL-encrypted diff --git a/src/backend/libpq/pg_ident.conf.sample b/src/backend/libpq/pg_ident.conf.sample index a5870e6448..8e3fa29135 100644 --- a/src/backend/libpq/pg_ident.conf.sample +++ b/src/backend/libpq/pg_ident.conf.sample @@ -7,12 +7,23 @@ # # This file controls PostgreSQL user name mapping. It maps external # user names to their corresponding PostgreSQL user names. Records -# are of the form: +# are one of these forms: # -# MAPNAME SYSTEM-USERNAME PG-USERNAME +# include FILE +# include_if_exists FILE +# include_dir DIRECTORY +# MAPNAME SYSTEM-USERNAME PG-USERNAME # # (The uppercase quantities must be replaced by actual values.) # +# If the first field is "include", "include_if_exists" or "include_dir", it's +# not a mapping record but a directive to include records from respectively +# another file, another file if it exists or all the files in the given +# directory ending in '.conf'. FILE is the file name to include, and +# DIR is the directory name containing the file(s) to include. FILE and +# DIRECTORY can be specified with a relative or absolute path, and can be +# double quoted if they contains spaces. +# # MAPNAME is the (otherwise freely chosen) map name that was used in # pg_hba.conf. SYSTEM-USERNAME is the detected user name of the # client. PG-USERNAME is the requested PostgreSQL user name. The diff --git a/src/backend/utils/adt/hbafuncs.c b/src/backend/utils/adt/hbafuncs.c index 255109ab07..d356804c5a 100644 --- a/src/backend/utils/adt/hbafuncs.c +++ b/src/backend/utils/adt/hbafuncs.c @@ -158,7 +158,7 @@ get_hba_options(HbaLine *hba) } /* Number of columns in pg_hba_file_rules view */ -#define NUM_PG_HBA_FILE_RULES_ATTS 10 +#define NUM_PG_HBA_FILE_RULES_ATTS 11 /* * fill_hba_line @@ -199,6 +199,8 @@ fill_hba_line(Tuplestorestate *tuple_store, TupleDesc tupdesc, nulls[index++] = true; else values[index++] = Int32GetDatum(rule_number); + /* file_name */ + values[index++] = CStringGetTextDatum(hba->sourcefile); /* line_number */ values[index++] = Int32GetDatum(hba->linenumber); @@ -342,7 +344,7 @@ fill_hba_line(Tuplestorestate *tuple_store, TupleDesc tupdesc, else { /* no parsing result, so set relevant fields to nulls */ - memset(&nulls[2], true, (NUM_PG_HBA_FILE_RULES_ATTS - 3) * sizeof(bool)); + memset(&nulls[3], true, (NUM_PG_HBA_FILE_RULES_ATTS - 4) * sizeof(bool)); } /* error */ @@ -383,7 +385,7 @@ fill_hba_view(Tuplestorestate *tuple_store, TupleDesc tupdesc) errmsg("could not open configuration file \"%s\": %m", HbaFileName))); - linecxt = tokenize_auth_file(HbaFileName, file, &hba_lines, DEBUG3); + linecxt = tokenize_auth_file(HbaFileName, file, &hba_lines, 0, DEBUG3); FreeFile(file); /* Now parse all the lines */ @@ -442,7 +444,7 @@ pg_hba_file_rules(PG_FUNCTION_ARGS) } /* Number of columns in pg_hba_file_mappings view */ -#define NUM_PG_IDENT_FILE_MAPPINGS_ATTS 6 +#define NUM_PG_IDENT_FILE_MAPPINGS_ATTS 7 /* * fill_ident_line: build one row of pg_ident_file_mappings view, add it to @@ -477,6 +479,8 @@ fill_ident_line(Tuplestorestate *tuple_store, TupleDesc tupdesc, nulls[index++] = true; else values[index++] = Int32GetDatum(mapping_number); + /* file_name */ + values[index++] = CStringGetTextDatum(ident->sourcefile); /* line_number */ values[index++] = Int32GetDatum(ident->linenumber); @@ -489,7 +493,7 @@ fill_ident_line(Tuplestorestate *tuple_store, TupleDesc tupdesc, else { /* no parsing result, so set relevant fields to nulls */ - memset(&nulls[2], true, (NUM_PG_IDENT_FILE_MAPPINGS_ATTS - 3) * sizeof(bool)); + memset(&nulls[3], true, (NUM_PG_IDENT_FILE_MAPPINGS_ATTS - 4) * sizeof(bool)); } /* error */ @@ -529,7 +533,7 @@ fill_ident_view(Tuplestorestate *tuple_store, TupleDesc tupdesc) errmsg("could not open usermap file \"%s\": %m", IdentFileName))); - linecxt = tokenize_auth_file(IdentFileName, file, &ident_lines, DEBUG3); + linecxt = tokenize_auth_file(IdentFileName, file, &ident_lines, 0, DEBUG3); FreeFile(file); /* Now parse all the lines */ @@ -550,8 +554,8 @@ fill_ident_view(Tuplestorestate *tuple_store, TupleDesc tupdesc) if (tok_line->err_msg == NULL) mapping_number++; - fill_ident_line(tuple_store, tupdesc, mapping_number, identline, - tok_line->err_msg); + fill_ident_line(tuple_store, tupdesc, mapping_number, + identline, tok_line->err_msg); } /* Free tokenizer memory */ diff --git a/src/backend/utils/misc/guc-file.l b/src/backend/utils/misc/guc-file.l index 721628c0cf..86b6cc1c8a 100644 --- a/src/backend/utils/misc/guc-file.l +++ b/src/backend/utils/misc/guc-file.l @@ -345,6 +345,110 @@ GUC_flex_fatal(const char *msg) return 0; /* keep compiler quiet */ } +/* + * Returns the list of config files located in a directory, in alphabetical + * order. + * + * We don't check for recursion or too-deep nesting depth here, its up to the + * caller to take care of that. + */ +char ** +GetDirConfFiles(const char *includedir, const char *calling_file, int elevel, + int *num_filenames, char **err_msg) +{ + char *directory; + DIR *d; + struct dirent *de; + char **filenames; + int size_filenames; + + /* + * Reject directory name that is all-blank (including empty), as that + * leads to confusion --- we'd read the containing directory, typically + * resulting in recursive inclusion of the same file(s). + */ + if (strspn(includedir, " \t\r\n") == strlen(includedir)) + { + ereport(elevel, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("empty configuration directory name: \"%s\"", + includedir))); + *err_msg = "empty configuration directory name"; + return NULL; + } + + directory = AbsoluteConfigLocation(includedir, calling_file); + d = AllocateDir(directory); + if (d == NULL) + { + ereport(elevel, + (errcode_for_file_access(), + errmsg("could not open configuration directory \"%s\": %m", + directory))); + *err_msg = psprintf("could not open directory \"%s\"", directory); + filenames = NULL; + goto cleanup; + } + + /* + * Read the directory and put the filenames in an array, so we can sort + * them prior to caller processing the contents. + */ + size_filenames = 32; + filenames = (char **) palloc(size_filenames * sizeof(char *)); + *num_filenames = 0; + + while ((de = ReadDir(d, directory)) != NULL) + { + PGFileType de_type; + char filename[MAXPGPATH]; + + /* + * Only parse files with names ending in ".conf". Explicitly reject + * files starting with ".". This excludes things like "." and "..", + * as well as typical hidden files, backup files, and editor debris. + */ + if (strlen(de->d_name) < 6) + continue; + if (de->d_name[0] == '.') + continue; + if (strcmp(de->d_name + strlen(de->d_name) - 5, ".conf") != 0) + continue; + + join_path_components(filename, directory, de->d_name); + canonicalize_path(filename); + de_type = get_dirent_type(filename, de, true, elevel); + if (de_type == PGFILETYPE_ERROR) + { + *err_msg = psprintf("could not stat file \"%s\"", filename); + pfree(filenames); + filenames = NULL; + goto cleanup; + } + else if (de_type != PGFILETYPE_DIR) + { + /* Add file to array, increasing its size in blocks of 32 */ + if (*num_filenames >= size_filenames) + { + size_filenames += 32; + filenames = (char **) repalloc(filenames, + size_filenames * sizeof(char *)); + } + filenames[*num_filenames] = pstrdup(filename); + (*num_filenames)++; + } + } + + if (*num_filenames > 0) + qsort(filenames, *num_filenames, sizeof(char *), pg_qsort_strcmp); + +cleanup: + if (d) + FreeDir(d); + pfree(directory); + return filenames; +} + /* * Read and parse a single configuration file. This function recurses * to handle "include" directives. @@ -606,127 +710,30 @@ ParseConfigDirectory(const char *includedir, ConfigVariable **head_p, ConfigVariable **tail_p) { - char *directory; - DIR *d; - struct dirent *de; + char *err_msg; char **filenames; int num_filenames; - int size_filenames; - bool status; - - /* - * Reject directory name that is all-blank (including empty), as that - * leads to confusion --- we'd read the containing directory, typically - * resulting in recursive inclusion of the same file(s). - */ - if (strspn(includedir, " \t\r\n") == strlen(includedir)) - { - ereport(elevel, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("empty configuration directory name: \"%s\"", - includedir))); - record_config_file_error("empty configuration directory name", - calling_file, calling_lineno, - head_p, tail_p); - return false; - } - - /* - * We don't check for recursion or too-deep nesting depth here; the - * subsequent calls to ParseConfigFile will take care of that. - */ - - directory = AbsoluteConfigLocation(includedir, calling_file); - d = AllocateDir(directory); - if (d == NULL) - { - ereport(elevel, - (errcode_for_file_access(), - errmsg("could not open configuration directory \"%s\": %m", - directory))); - record_config_file_error(psprintf("could not open directory \"%s\"", - directory), - calling_file, calling_lineno, - head_p, tail_p); - status = false; - goto cleanup; - } - /* - * Read the directory and put the filenames in an array, so we can sort - * them prior to processing the contents. - */ - size_filenames = 32; - filenames = (char **) palloc(size_filenames * sizeof(char *)); - num_filenames = 0; + filenames = GetDirConfFiles(includedir, calling_file, elevel, + &num_filenames, &err_msg); - while ((de = ReadDir(d, directory)) != NULL) + if (!filenames) { - PGFileType de_type; - char filename[MAXPGPATH]; - - /* - * Only parse files with names ending in ".conf". Explicitly reject - * files starting with ".". This excludes things like "." and "..", - * as well as typical hidden files, backup files, and editor debris. - */ - if (strlen(de->d_name) < 6) - continue; - if (de->d_name[0] == '.') - continue; - if (strcmp(de->d_name + strlen(de->d_name) - 5, ".conf") != 0) - continue; - - join_path_components(filename, directory, de->d_name); - canonicalize_path(filename); - de_type = get_dirent_type(filename, de, true, elevel); - if (de_type == PGFILETYPE_ERROR) - { - record_config_file_error(psprintf("could not stat file \"%s\"", - filename), - calling_file, calling_lineno, - head_p, tail_p); - status = false; - goto cleanup; - } - else if (de_type != PGFILETYPE_DIR) - { - /* Add file to array, increasing its size in blocks of 32 */ - if (num_filenames >= size_filenames) - { - size_filenames += 32; - filenames = (char **) repalloc(filenames, - size_filenames * sizeof(char *)); - } - filenames[num_filenames] = pstrdup(filename); - num_filenames++; - } + record_config_file_error(err_msg, calling_file, calling_lineno, head_p, + tail_p); + return false; } - if (num_filenames > 0) + for (int i = 0; i < num_filenames; i++) { - int i; - - qsort(filenames, num_filenames, sizeof(char *), pg_qsort_strcmp); - for (i = 0; i < num_filenames; i++) - { - if (!ParseConfigFile(filenames[i], true, - calling_file, calling_lineno, - depth, elevel, - head_p, tail_p)) - { - status = false; - goto cleanup; - } - } + if (!ParseConfigFile(filenames[i], true, + calling_file, calling_lineno, + depth, elevel, + head_p, tail_p)) + return false; } - status = true; -cleanup: - if (d) - FreeDir(d); - pfree(directory); - return status; + return true; } /* diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 4f5d05d0ce..2ad06c4d3e 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -6135,16 +6135,16 @@ { oid => '3401', descr => 'show pg_hba.conf rules', proname => 'pg_hba_file_rules', prorows => '1000', proretset => 't', provolatile => 'v', prorettype => 'record', proargtypes => '', - proallargtypes => '{int4,int4,text,_text,_text,text,text,text,_text,text}', - proargmodes => '{o,o,o,o,o,o,o,o,o,o}', - proargnames => '{rule_number,line_number,type,database,user_name,address,netmask,auth_method,options,error}', + proallargtypes => '{int4,text,int4,text,_text,_text,text,text,text,_text,text}', + proargmodes => '{o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{rule_number,file_name,line_number,type,database,user_name,address,netmask,auth_method,options,error}', prosrc => 'pg_hba_file_rules' }, { oid => '6250', descr => 'show pg_ident.conf mappings', proname => 'pg_ident_file_mappings', prorows => '1000', proretset => 't', provolatile => 'v', prorettype => 'record', proargtypes => '', - proallargtypes => '{int4,int4,text,text,text,text}', - proargmodes => '{o,o,o,o,o,o}', - proargnames => '{mapping_number,line_number,map_name,sys_name,pg_username,error}', + proallargtypes => '{int4,text,int4,text,text,text,text}', + proargmodes => '{o,o,o,o,o,o,o}', + proargnames => '{mapping_number,file_name,line_number,map_name,sys_name,pg_username,error}', prosrc => 'pg_ident_file_mappings' }, { oid => '1371', descr => 'view system lock information', proname => 'pg_lock_status', prorows => '1000', proretset => 't', diff --git a/src/include/libpq/hba.h b/src/include/libpq/hba.h index bf896ac084..7108cd2dae 100644 --- a/src/include/libpq/hba.h +++ b/src/include/libpq/hba.h @@ -179,6 +179,7 @@ extern HbaLine *parse_hba_line(TokenizedAuthLine *tok_line, int elevel); extern IdentLine *parse_ident_line(TokenizedAuthLine *tok_line, int elevel); extern bool pg_isblank(const char c); extern MemoryContext tokenize_auth_file(const char *filename, FILE *file, - List **tok_lines, int elevel); + List **tok_lines, int depth, + int elevel); #endif /* HBA_H */ diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h index b3aaff9665..59ca39d908 100644 --- a/src/include/utils/guc.h +++ b/src/include/utils/guc.h @@ -144,6 +144,8 @@ typedef struct ConfigVariable struct ConfigVariable *next; } ConfigVariable; +extern char **GetDirConfFiles(const char *includedir, const char *calling_file, + int elevel, int *num_filenames, char **err_msg); extern bool ParseConfigFile(const char *config_file, bool strict, const char *calling_file, int calling_lineno, int depth, int elevel, diff --git a/src/test/authentication/t/003_file_inclusion.pl b/src/test/authentication/t/003_file_inclusion.pl new file mode 100644 index 0000000000..8eae72b8d4 --- /dev/null +++ b/src/test/authentication/t/003_file_inclusion.pl @@ -0,0 +1,657 @@ + +# Copyright (c) 2021-2022, PostgreSQL Global Development Group + +# Set of tests for authentication and pg_hba.conf inclusion. +# This test can only run with Unix-domain sockets. + +use strict; +use warnings; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; +use Time::HiRes qw(usleep); +use IPC::Run qw(pump finish timer); +use Data::Dumper; + +if (!$use_unix_sockets) +{ + plan skip_all => + "authentication tests cannot run without Unix-domain sockets"; +} + +# stores the current line counter for each file. hba_rule and ident_rule are +# fake file names used for the global rule number for each auth view. +my %cur_line = ('hba_rule' => 1, 'ident_rule' => 1); + +my $hba_file = 'subdir1/pg_hba_custom.conf'; +my $ident_file = 'subdir2/pg_ident_custom.conf'; + +# Initialize primary node +my $node = PostgreSQL::Test::Cluster->new('primary'); +$node->init; +$node->start; + +my $data_dir = $node->data_dir; + +# Normalize the data directory for Windows +$data_dir =~ s/\/\.\//\//g; # reduce /./ to / +$data_dir =~ s/\/\//\//g; # reduce // to / +$data_dir =~ s/\/$//; # remove trailing / + + +# Add the given payload to the given relative HBA file of the given node. +# This function maintains the %cur_line metadata, so it has to be called in the +# expected inclusion evaluation order in order to keep it in sync. +# +# If the payload starts with "include" or "ignore", the function doesn't +# increase the general hba rule number. +# +# If an err_str is provided, it returns an arrayref containing the provided +# filename, the current line number in that file and the provided err_str. The +# err_str has to be a valid regex string. +# Otherwise it only returns the line number of the payload in the wanted file. +# This function has to be called in the expected inclusion evaluation order to +# keep the %cur_line information in sync. +sub add_hba_line +{ + my $node = shift; + my $filename = shift; + my $payload = shift; + my $err_str = shift; + my $globline; + my $fileline; + my @tokens; + my $line; + + # Append the payload to the given file + $node->append_conf($filename, $payload); + + # Get the current %cur_line counter for the file + if (not defined $cur_line{$filename}) + { + $cur_line{$filename} = 1; + } + $fileline = $cur_line{$filename}++; + + # Include directive, don't generate an underlying pg_hba_file_rules line + # but make sure we incremented the %cur_line counter. + # Also ignore line beginning with "ignore", for content of files that + # should not being included + if ($payload =~ qr/^(include|ignore)/) + { + if (defined $err_str) + { + return [$filename, $fileline, $err_str]; + } + else + { + return $fileline; + } + } + + # Get (and increment) the global rule number + $globline = $cur_line{'hba_rule'}++; + + # If caller provided an err_str, just returns the needed metadata + if (defined $err_str) + { + return [$filename, $fileline, $err_str]; + } + + # Otherwise, generate the expected pg_hba_file_rules line + @tokens = split(/ /, $payload); + $tokens[1] = '{' . $tokens[1] . '}'; # database + $tokens[2] = '{' . $tokens[2] . '}'; # user_name + + # add empty address and netmask betweed user_name and auth_method + splice @tokens, 3, 0, ''; + splice @tokens, 3, 0, ''; + + # append empty options and error + push @tokens, ''; + push @tokens, ''; + + # generate the expected final line + $line = ""; + $line .= "\n" if ($globline > 1); + $line .= "$globline|$data_dir/$filename|$fileline|"; + $line .= join('|', @tokens); + + return $line; +} + +# Add the given payload to the given relative ident file of the given node. +# Same as add_hba_line but for pg_ident files +sub add_ident_line +{ + my $node = shift; + my $filename = shift; + my $payload = shift; + my $err_str = shift; + my $globline; + my $fileline; + my @tokens; + my $line; + + # Append the payload to the given file + $node->append_conf($filename, $payload); + + # Get the current %cur_line counter for the file + if (not defined $cur_line{$filename}) + { + $cur_line{$filename} = 1; + } + $fileline = $cur_line{$filename}++; + + # Include directive, don't generate an underlying pg_hba_file_rules line + # but make sure we incremented the %cur_line counter. + # Also ignore line beginning with "ignore", for content of files that + # should not being included + if ($payload =~ qr/^(include|ignore)/) + { + if (defined $err_str) + { + return [$filename, $fileline, $err_str]; + } + else + { + return $fileline; + } + } + + # Get (and increment) the global rule number + $globline = $cur_line{'ident_rule'}++; + + # If caller provided an err_str, just returns the needed metadata + if (defined $err_str) + { + return [$filename, $fileline, $err_str]; + } + + # Otherwise, generate the expected pg_ident_file_mappings line + @tokens = split(/ /, $payload); + + # append empty error + push @tokens, ''; + + # generate the expected final line + $line = ""; + $line .= "\n" if ($globline > 1); + $line .= "$globline|$data_dir/$filename|$fileline|"; + $line .= join('|', @tokens); + + return $line; +} + +# Delete pg_hba.conf from the given node, add various entries to test the +# include infrastructure and then execute a reload to refresh it. +sub generate_valid_auth_files +{ + my $node = shift; + my $hba_expected = ''; + my $ident_expected = ''; + + # customise main auth file names + $node->safe_psql('postgres', "ALTER SYSTEM SET hba_file = '$data_dir/$hba_file'"); + $node->safe_psql('postgres', "ALTER SYSTEM SET ident_file = '$data_dir/$ident_file'"); + + # and make original ones invalid to be sure they're not used anywhere + $node->append_conf('pg_hba.conf', "some invalid line"); + $node->append_conf('pg_ident.conf', "some invalid line"); + + # pg_hba stuff + mkdir("$data_dir/subdir1"); + mkdir("$data_dir/hba_inc"); + mkdir("$data_dir/hba_inc_if"); + mkdir("$data_dir/hba_pos"); + + # Make sure we will still be able to connect + $hba_expected .= add_hba_line($node, "$hba_file", 'local all all trust'); + + # Add include data + add_hba_line($node, "$hba_file", "include ../pg_hba_pre.conf"); + $hba_expected .= add_hba_line($node, 'pg_hba_pre.conf', "local pre all reject"); + + $hba_expected .= add_hba_line($node, "$hba_file", "local all all reject"); + + add_hba_line($node, "$hba_file", "include ../hba_pos/pg_hba_pos.conf"); + $hba_expected .= add_hba_line($node, 'hba_pos/pg_hba_pos.conf', "local pos all reject"); + # include is relative to current path + add_hba_line($node, 'hba_pos/pg_hba_pos.conf', "include pg_hba_pos2.conf"); + $hba_expected .= add_hba_line($node, 'hba_pos/pg_hba_pos2.conf', "local pos2 all reject"); + + # include_if_exists data + add_hba_line($node, "$hba_file", "include_if_exists ../hba_inc_if/none"); + add_hba_line($node, "$hba_file", "include_if_exists ../hba_inc_if/some"); + $hba_expected .= add_hba_line($node, 'hba_inc_if/some', "local if_some all reject"); + + # include_dir data + add_hba_line($node, "$hba_file", "include_dir ../hba_inc"); + add_hba_line($node, 'hba_inc/garbageconf', "ignore - should not be included"); + $hba_expected .= add_hba_line($node, 'hba_inc/01_z.conf', "local dir_z all reject"); + $hba_expected .= add_hba_line($node, 'hba_inc/02_a.conf', "local dir_a all reject"); + + # secondary auth file + add_hba_line($node, $hba_file, 'local @../dbnames.conf all reject'); + $node->append_conf('dbnames.conf', "db1"); + $node->append_conf('dbnames.conf', "db3"); + $hba_expected .= "\n" . ($cur_line{'hba_rule'} - 1) + . "|$data_dir/$hba_file|" . ($cur_line{$hba_file} - 1) + . '|local|{db1,db3}|{all}|||reject||'; + + # pg_ident stuff + mkdir("$data_dir/subdir2"); + mkdir("$data_dir/ident_inc"); + mkdir("$data_dir/ident_inc_if"); + mkdir("$data_dir/ident_pos"); + + # Add include data + add_ident_line($node, "$ident_file", "include ../pg_ident_pre.conf"); + $ident_expected .= add_ident_line($node, 'pg_ident_pre.conf', "pre foo bar"); + + $ident_expected .= add_ident_line($node, "$ident_file", "test a b"); + + add_ident_line($node, "$ident_file", "include ../ident_pos/pg_ident_pos.conf"); + $ident_expected .= add_ident_line($node, 'ident_pos/pg_ident_pos.conf', "pos foo bar"); + # include is relative to current path + add_ident_line($node, 'ident_pos/pg_ident_pos.conf', "include pg_ident_pos2.conf"); + $ident_expected .= add_ident_line($node, 'ident_pos/pg_ident_pos2.conf', "pos2 foo bar"); + + # include_if_exists data + add_ident_line($node, "$ident_file", "include_if_exists ../ident_inc_if/none"); + add_ident_line($node, "$ident_file", "include_if_exists ../ident_inc_if/some"); + $ident_expected .= add_ident_line($node, 'ident_inc_if/some', "if_some foo bar"); + + # include_dir data + add_ident_line($node, "$ident_file", "include_dir ../ident_inc"); + add_ident_line($node, 'ident_inc/garbageconf', "ignore - should not be included"); + $ident_expected .= add_ident_line($node, 'ident_inc/01_z.conf', "dir_z foo bar"); + $ident_expected .= add_ident_line($node, 'ident_inc/02_a.conf', "dir_a foo bar"); + + $node->restart; + $node->connect_ok('dbname=postgres', + 'Connection ok after generating valid auth files'); + + return ($hba_expected, $ident_expected); +} + +# Delete pg_hba.conf and pg_ident.conf from the given node and add minimal +# entries to allow authentication. +sub reset_auth_files +{ + my $node = shift; + + unlink("$data_dir/$hba_file"); + unlink("$data_dir/$ident_file"); + + %cur_line = ('hba_rule' => 1, 'ident_rule' => 1); + + return add_hba_line($node, "$hba_file", 'local all all trust'); +} + +# Generate a list of expected error regex for the given array of error +# conditions, as generated by add_hba_line/add_ident_line with an err_str. +# +# 2 regex are generated per array entry: one for the given err_str, and one for +# the expected line in the specific file. Since all lines are independant, +# there's no guarantee that a specific failure regex and the per-line regex +# will match the same error. Calling code should add at least one test with a +# single error to make sure that the line number / file name is correct. +# +# On top of that, an extra line is generated for the general failure to process +# the main auth file. +sub generate_log_err_patterns +{ + my $node = shift; + my $raw_errors = shift; + my $is_hba_err = shift; + my @errors; + + foreach my $arr (@{$raw_errors}) + { + my $filename = @{$arr}[0]; + my $fileline = @{$arr}[1]; + my $err_str = @{$arr}[2]; + + push @errors, qr/$err_str/; + + # Context messages with the file / line location aren't always emitted + if ($err_str !~ /maximum nesting depth exceeded/ and + $err_str !~ /could not open secondary authentication file/) + { + push @errors, qr/line $fileline of configuration file "$data_dir\/$filename"/ + } + } + + push @errors, qr/could not load $data_dir\/$hba_file/ if ($is_hba_err); + + return \@errors; +} + +# Generate the expected output for the auth file view error reporting (file +# name, file line, error), for the given array of error conditions, as +# generated generated by add_hba_line/add_ident_line with an err_str. +sub generate_log_err_rows +{ + my $node = shift; + my $raw_errors = shift; + my $exp_rows = ''; + + foreach my $arr (@{$raw_errors}) + { + my $filename = @{$arr}[0]; + my $fileline = @{$arr}[1]; + my $err_str = @{$arr}[2]; + + $exp_rows .= "\n" if ($exp_rows ne ""); + + # Unescape regex patterns if any + $err_str =~ s/\\([\(\)])/$1/g; + $exp_rows .= "|$data_dir\/$filename|$fileline|$err_str" + } + + return $exp_rows; +} + +# Reset the main auth files, append the given payload to the given config file, +# and check that the instance cannot start, raising the expected error line(s). +sub start_errors_like +{ + my $node = shift; + my $file = shift; + my $payload = shift; + my $pattern = shift; + my $should_fail = shift; + + reset_auth_files($node); + $node->append_conf($file, $payload); + + unlink($node->logfile); + my $ret = + PostgreSQL::Test::Utils::system_log('pg_ctl', '-D', $data_dir, + '-l', $node->logfile, 'start'); + + if ($should_fail) + { + ok($ret != 0, "Cannot start postgres with faulty $file"); + } + else + { + ok($ret == 0, "postgres can start with faulty $file"); + } + + my $log_contents = slurp_file($node->logfile); + + foreach (@{$pattern}) + { + like($log_contents, + $_, + "Expected failure found in the logs"); + } + + if (not $should_fail) + { + # We can't simply call $node->stop here as the call is optimized out + # when the server isn't started with $node->start. + my $ret = + PostgreSQL::Test::Utils::system_log('pg_ctl', '-D', + $data_dir, 'stop', '-m', 'fast'); + ok($ret == 0, "Could stop postgres"); + } +} + +# We should be able to connect, and see an empty pg_ident.conf +is($node->psql( + 'postgres', 'SELECT count(*) FROM pg_ident_file_mappings'), + qq(0), + 'pg_ident.conf is empty'); + +############################################ +# part 1, test view reporting for valid data +############################################ +my ($exp_hba, $exp_ident) = generate_valid_auth_files($node); + +$node->connect_ok('dbname=postgres', 'Connection still ok'); + +is($node->safe_psql( + 'postgres', 'SELECT * FROM pg_hba_file_rules'), + qq($exp_hba), + 'pg_hba_file_rules content is expected'); + +is($node->safe_psql( + 'postgres', 'SELECT * FROM pg_ident_file_mappings'), + qq($exp_ident), + 'pg_ident_file_mappings content is expected'); + +############################################# +# part 2, test log reporting for invalid data +############################################# +reset_auth_files($node); +$node->restart('fast'); +$node->connect_ok('dbname=postgres', + 'Connection ok after resetting auth files'); + +$node->stop('fast'); + +start_errors_like($node, $hba_file, "include ../not_a_file", + [ + qr/could not open included authentication file "\.\.\/not_a_file" as "$data_dir\/not_a_file": No such file or directory/, + qr/could not load $data_dir\/$hba_file/ + ], 1); + +# include_dir, single included file +mkdir("$data_dir/hba_inc_fail"); +add_hba_line($node, "hba_inc_fail/inc_dir.conf", "local all all reject"); +add_hba_line($node, "hba_inc_fail/inc_dir.conf", "local all all reject"); +add_hba_line($node, "hba_inc_fail/inc_dir.conf", "local all all reject"); +add_hba_line($node, "hba_inc_fail/inc_dir.conf", "not_a_token"); +start_errors_like($node, $hba_file, "include_dir ../hba_inc_fail", + [ + qr/invalid connection type "not_a_token"/, + qr/line 4 of configuration file "$data_dir\/hba_inc_fail\/inc_dir\.conf"/, + qr/could not load $data_dir\/$hba_file/ + ], 1); + +# include_dir, single included file with nested inclusion +unlink("$data_dir/hba_inc_fail/inc_dir.conf"); +my @hba_raw_errors_step1; + +add_hba_line($node, "hba_inc_fail/inc_dir.conf", "include file1"); + +add_hba_line($node, "hba_inc_fail/file1", "include file2"); +add_hba_line($node, "hba_inc_fail/file2", "local all all reject"); +add_hba_line($node, "hba_inc_fail/file2", "include file3"); + +add_hba_line($node, "hba_inc_fail/file3", "local all all reject"); +add_hba_line($node, "hba_inc_fail/file3", "local all all reject"); +push @hba_raw_errors_step1, add_hba_line($node, "hba_inc_fail/file3", + "local all all zuul", + 'invalid authentication method "zuul"'); + +start_errors_like( + $node, $hba_file, "include_dir ../hba_inc_fail", + generate_log_err_patterns($node, \@hba_raw_errors_step1, 1), 1); + +# start_errors_like will reset the main auth files, so the previous error won't +# occur again. We keep it around as we will put back both bogus inclusions for +# the tests at step 3. +my @hba_raw_errors_step2; + +# include_if_exists, with various problems +push @hba_raw_errors_step2, add_hba_line($node, "hba_if_exists.conf", + "local", + "end-of-line before database specification"); +push @hba_raw_errors_step2, add_hba_line($node, "hba_if_exists.conf", + "local,host", + "multiple values specified for connection type"); +push @hba_raw_errors_step2, add_hba_line($node, "hba_if_exists.conf", + "local all", + "end-of-line before role specification"); +push @hba_raw_errors_step2, add_hba_line($node, "hba_if_exists.conf", + "local all all", + "end-of-line before authentication method"); +push @hba_raw_errors_step2, add_hba_line($node, "hba_if_exists.conf", + "host all all test/42", + 'specifying both host name and CIDR mask is invalid: "test/42"'); +push @hba_raw_errors_step2, add_hba_line($node, "hba_if_exists.conf", + 'local @dbnames_fails.conf all reject', + "could not open secondary authentication file \"\@dbnames_fails.conf\" as \"$data_dir/dbnames_fails.conf\": No such file or directory"); + +add_hba_line($node, "hba_if_exists.conf", "include recurse.conf"); +push @hba_raw_errors_step2, add_hba_line($node, "recurse.conf", + "include recurse.conf", + 'could not open configuration file "recurse.conf": maximum nesting depth exceeded'); + +# Generate the regex for the expected errors in the logs. There's no guarantee +# that the generated "line X of file..." will be emitted for the expected line, +# but previous tests already ensured that the correct line number / file name +# was emitted, so ensuring that there's an error in all expected lines is +# enough here. +my $expected_errors = generate_log_err_patterns($node, \@hba_raw_errors_step2, + 1); + +# Not an error, but it should raise a message in the logs. Manually add an +# extra log message to detect +add_hba_line($node, "hba_if_exists.conf", "include_if_exists if_exists_none"); +push @{$expected_errors}, + qr/skipping missing authentication file "$data_dir\/if_exists_none"/; + +start_errors_like( + $node, $hba_file, "include_if_exists ../hba_if_exists.conf", + $expected_errors, 1); + +# Mostly the same, but for ident files +reset_auth_files($node); + +my @ident_raw_errors_step1; + +# include_dir, single included file with nested inclusion +mkdir("$data_dir/ident_inc_fail"); +add_ident_line($node, "ident_inc_fail/inc_dir.conf", "include file1"); + +add_ident_line($node, "ident_inc_fail/file1", "include file2"); +add_ident_line($node, "ident_inc_fail/file2", "ok ok ok"); +add_ident_line($node, "ident_inc_fail/file2", "include file3"); + +add_ident_line($node, "ident_inc_fail/file3", "ok ok ok"); +add_ident_line($node, "ident_inc_fail/file3", "ok ok ok"); +push @ident_raw_errors_step1, add_ident_line($node, "ident_inc_fail/file3", + "failmap /(fail postgres", + 'invalid regular expression "\(fail": parentheses \(\) not balanced'); + +start_errors_like( + $node, $ident_file, "include_dir ../ident_inc_fail", + generate_log_err_patterns($node, \@ident_raw_errors_step1, 0), + 0); + +# start_errors_like will reset the main auth files, so the previous error won't +# occur again. We keep it around as we will put back both bogus inclusions for +# the tests at step 3. +my @ident_raw_errors_step2; + +# include_if_exists, with various problems +push @ident_raw_errors_step2, add_ident_line($node, "ident_if_exists.conf", "map", + "missing entry at end of line"); +push @ident_raw_errors_step2, add_ident_line($node, "ident_if_exists.conf", "map1,map2", + "multiple values in ident field"); +push @ident_raw_errors_step2, add_ident_line($node, "ident_if_exists.conf", + 'map @osnames_fails.conf postgres', + "could not open secondary authentication file \"\@osnames_fails.conf\" as \"$data_dir/osnames_fails.conf\": No such file or directory"); + +add_ident_line($node, "ident_if_exists.conf", "include ident_recurse.conf"); +push @ident_raw_errors_step2, add_ident_line($node, "ident_recurse.conf", "include ident_recurse.conf", + 'could not open configuration file "ident_recurse.conf": maximum nesting depth exceeded'); + +start_errors_like( + $node, $ident_file, "include_if_exists ../ident_if_exists.conf", + # There's no guarantee that the generated "line X of file..." will be + # emitted for the expected line, but previous tests already ensured that + # the correct line number / file name was emitted, so ensuring that there's + # an error in all expected lines is enough here. + generate_log_err_patterns($node, \@ident_raw_errors_step2, 0), + 0); + +##################################################### +# part 3, test reporting of various error scenario +# NOTE: this will be bypassed -DEXEC_BACKEND or win32 +##################################################### +reset_auth_files($node); + +$node->start; +$node->connect_ok('dbname=postgres', 'Can connect after an auth file reset'); + +is($node->safe_psql( + 'postgres', + 'SELECT count(*) FROM pg_hba_file_rules WHERE error IS NOT NULL'), + qq(0), + 'No error expected in pg_hba_file_rules'); + +add_ident_line($node, $ident_file, ''); +is($node->safe_psql( + 'postgres', + 'SELECT count(*) FROM pg_ident_file_mappings WHERE error IS NOT NULL'), + qq(0), + 'No error expected in pg_ident_file_mappings'); + +# The instance could be restarted and no error is detected. Now check if the +# build is compatible with the view error reporting (EXEC_BACKEND / win32 will +# fail when trying to connect as they always rely on the current auth files +# content) +my @hba_raw_errors; + +push @hba_raw_errors, add_hba_line($node, $hba_file, "include ../not_a_file", + "could not open included authentication file \"../not_a_file\" as \"$data_dir/not_a_file\": No such file or directory"); + +my ($stdout, $stderr); +my $cmdret = $node->psql('postgres', 'SELECT 1', + stdout => \$stdout, stderr => \$stderr); + +if ($cmdret != 0) +{ + # Connection failed. Bail out, but make sure to raise a failure if it + # didn't fail for the expected hba file modification. + like($stderr, + qr/connection to server.* failed: FATAL: could not load $data_dir\/$hba_file/, + "Connection failed due to loading an invalid hba file"); + + done_testing(); + diag("Build not compatible with auth file view error reporting, bail out.\n"); + exit; +} + +# Combine errors generated at step 2, in the same order. +$node->append_conf($hba_file, "include_dir ../hba_inc_fail"); +push @hba_raw_errors, @hba_raw_errors_step1; + +$node->append_conf($hba_file, "include_if_exists ../hba_if_exists.conf"); +push @hba_raw_errors, @hba_raw_errors_step2; + +my $hba_expected = generate_log_err_rows($node, \@hba_raw_errors); +is($node->safe_psql( + 'postgres', + 'SELECT rule_number, file_name, line_number, error FROM pg_hba_file_rules' + . ' WHERE error IS NOT NULL ORDER BY rule_number'), + qq($hba_expected), + 'Detected all error in hba file'); + +# and do the same for pg_ident +my @ident_raw_errors; + +push @ident_raw_errors, add_ident_line($node, $ident_file, "include ../not_a_file", + "could not open included authentication file \"../not_a_file\" as \"$data_dir/not_a_file\": No such file or directory"); + +$node->append_conf($ident_file, "include_dir ../ident_inc_fail"); +push @ident_raw_errors, @ident_raw_errors_step1; + +$node->append_conf($ident_file, "include_if_exists ../ident_if_exists.conf"); +push @ident_raw_errors, @ident_raw_errors_step2; + +my $ident_expected = generate_log_err_rows($node, \@ident_raw_errors); +is($node->safe_psql( + 'postgres', + 'SELECT mapping_number, file_name, line_number, error FROM pg_ident_file_mappings' + . ' WHERE error IS NOT NULL ORDER BY mapping_number'), + qq($ident_expected), + 'Detected all error in ident file'); + +done_testing(); diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index 178e536e21..3d8f182674 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -1338,6 +1338,7 @@ pg_group| SELECT pg_authid.rolname AS groname, FROM pg_authid WHERE (NOT pg_authid.rolcanlogin); pg_hba_file_rules| SELECT a.rule_number, + a.file_name, a.line_number, a.type, a.database, @@ -1347,14 +1348,15 @@ pg_hba_file_rules| SELECT a.rule_number, a.auth_method, a.options, a.error - FROM pg_hba_file_rules() a(rule_number, line_number, type, database, user_name, address, netmask, auth_method, options, error); + FROM pg_hba_file_rules() a(rule_number, file_name, line_number, type, database, user_name, address, netmask, auth_method, options, error); pg_ident_file_mappings| SELECT a.mapping_number, + a.file_name, a.line_number, a.map_name, a.sys_name, a.pg_username, a.error - FROM pg_ident_file_mappings() a(mapping_number, line_number, map_name, sys_name, pg_username, error); + FROM pg_ident_file_mappings() a(mapping_number, file_name, line_number, map_name, sys_name, pg_username, error); pg_indexes| SELECT n.nspname AS schemaname, c.relname AS tablename, i.relname AS indexname, -- 2.37.0