Add Unicode property tables.

Provide functions to test for Unicode properties, such as Alphabetic
or Cased. These functions use tables derived from Unicode data files,
similar to the tables for Unicode normalization or general category,
and those tables can be updated with the 'update-unicode' build
target.

Use Unicode properties to provide functions to test for regex
character classes, like 'punct' or 'alnum'.

Infrastructure in preparation for a builtin collation provider, and
may also be useful for other callers.

Discussion: https://postgr.es/m/ff4c2f2f9c8fc7ca27c1c24ae37ecaeaeaff6b53.camel%40j-davis.com
Reviewed-by: Daniel Verite, Peter Eisentraut, Jeremy Schneider
This commit is contained in:
Jeff Davis 2024-03-06 12:50:01 -08:00
parent 2ed8f9a01e
commit ad49994538
8 changed files with 4607 additions and 105 deletions

View File

@ -29,13 +29,13 @@ update-unicode: unicode_category_table.h unicode_east_asian_fw_table.h unicode_n
# These files are part of the Unicode Character Database. Download
# them on demand. The dependency on Makefile.global is for
# UNICODE_VERSION.
CompositionExclusions.txt DerivedNormalizationProps.txt EastAsianWidth.txt NormalizationTest.txt UnicodeData.txt: $(top_builddir)/src/Makefile.global
CompositionExclusions.txt DerivedCoreProperties.txt DerivedNormalizationProps.txt EastAsianWidth.txt NormalizationTest.txt PropList.txt UnicodeData.txt: $(top_builddir)/src/Makefile.global
$(DOWNLOAD) https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/$(@F)
unicode_version.h: generate-unicode_version.pl
$(PERL) $< --version $(UNICODE_VERSION)
unicode_category_table.h: generate-unicode_category_table.pl UnicodeData.txt
unicode_category_table.h: generate-unicode_category_table.pl DerivedCoreProperties.txt PropList.txt UnicodeData.txt
$(PERL) $<
# Generation of conversion tables used for string normalization with
@ -82,4 +82,4 @@ clean:
rm -f $(OBJS) category_test category_test.o norm_test norm_test.o
distclean: clean
rm -f CompositionExclusions.txt DerivedNormalizationProps.txt EastAsianWidth.txt NormalizationTest.txt UnicodeData.txt norm_test_table.h unicode_category_table.h unicode_norm_table.h
rm -f CompositionExclusions.txt DerivedCoreProperties.txt DerivedNormalizationProps.txt EastAsianWidth.txt NormalizationTest.txt PropList.txt UnicodeData.txt norm_test_table.h unicode_category_table.h unicode_norm_table.h

View File

@ -1,22 +1,35 @@
This directory contains tools to generate the tables in
src/include/common/unicode_norm.h, used for Unicode normalization. The
generated .h file is included in the source tree, so these are normally not
needed to build PostgreSQL, only if you need to re-generate the .h file
from the Unicode data files for some reason, e.g. to update to a new version
of Unicode.
This directory contains tools to download new Unicode data files and
generate static tables. These tables are used to normalize or
determine various properties of Unicode data.
Generating unicode_norm_table.h
-------------------------------
The generated header files are copied to src/include/common/, and
included in the source tree, so these tools are not normally required
to build PostgreSQL.
Run
Update Unicode Version
----------------------
Edit src/Makefile.global.in and src/common/unicode/meson.build
to update the UNICODE_VERSION.
Then, generate the new header files with:
make update-unicode
from the top level of the source tree and commit the result.
or if using meson:
ninja update-unicode
from the top level of the source tree. Examine the result to make sure
the changes look reasonable (that is, that the diff size and scope is
comparable to the Unicode changes since the last update), and then
commit it.
Tests
-----
Normalization tests:
The Unicode consortium publishes a comprehensive test suite for the
normalization algorithm, in a file called NormalizationTest.txt. This
directory also contains a perl script and some C code, to run our
@ -26,3 +39,15 @@ To download NormalizationTest.txt and run the tests:
make normalization-check
This is also run as part of the update-unicode target.
Category & Property tests:
The file category_test.c exhaustively compares the category and
properties of each code point as determined by the generated tables
with the category and properties as reported by ICU. For this test to
be effective, the version of the Unicode data files must be similar to
the version of Unicode on which ICU is based, so attempt to match the
versions as closely as possible. A mismatched Unicode will skip over
codepoints that are assigned in one version and not the other, and may
falsely report failures. This test is run as a part of the
update-unicode target.

View File

@ -1,6 +1,6 @@
/*-------------------------------------------------------------------------
* category_test.c
* Program to test Unicode general category functions.
* Program to test Unicode general category and character properties.
*
* Portions Copyright (c) 2017-2024, PostgreSQL Global Development Group
*
@ -14,17 +14,23 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <wctype.h>
#ifdef USE_ICU
#include <unicode/uchar.h>
#endif
#include "common/unicode_category.h"
#include "common/unicode_version.h"
static int pg_unicode_version = 0;
#ifdef USE_ICU
static int icu_unicode_version = 0;
#endif
/*
* Parse version into integer for easy comparison.
*/
#ifdef USE_ICU
static int
parse_unicode_version(const char *version)
{
@ -39,57 +45,175 @@ parse_unicode_version(const char *version)
return major * 100 + minor;
}
#endif
/*
* Exhaustively test that the Unicode category for each codepoint matches that
* returned by ICU.
*/
int
main(int argc, char **argv)
{
#ifdef USE_ICU
int pg_unicode_version = parse_unicode_version(PG_UNICODE_VERSION);
int icu_unicode_version = parse_unicode_version(U_UNICODE_VERSION);
/*
* Test Postgres Unicode tables by comparing with ICU. Test the General
* Category, as well as the properties Alphabetic, Lowercase, Uppercase,
* White_Space, and Hex_Digit.
*/
static void
icu_test()
{
int successful = 0;
int pg_skipped_codepoints = 0;
int icu_skipped_codepoints = 0;
printf("category_test: Postgres Unicode version:\t%s\n", PG_UNICODE_VERSION);
printf("category_test: ICU Unicode version:\t\t%s\n", U_UNICODE_VERSION);
for (UChar32 code = 0; code <= 0x10ffff; code++)
for (pg_wchar code = 0; code <= 0x10ffff; code++)
{
uint8_t pg_category = unicode_category(code);
uint8_t icu_category = u_charType(code);
/* Property tests */
bool prop_alphabetic = pg_u_prop_alphabetic(code);
bool prop_lowercase = pg_u_prop_lowercase(code);
bool prop_uppercase = pg_u_prop_uppercase(code);
bool prop_cased = pg_u_prop_cased(code);
bool prop_case_ignorable = pg_u_prop_case_ignorable(code);
bool prop_white_space = pg_u_prop_white_space(code);
bool prop_hex_digit = pg_u_prop_hex_digit(code);
bool prop_join_control = pg_u_prop_join_control(code);
bool icu_prop_alphabetic = u_hasBinaryProperty(
code, UCHAR_ALPHABETIC);
bool icu_prop_lowercase = u_hasBinaryProperty(
code, UCHAR_LOWERCASE);
bool icu_prop_uppercase = u_hasBinaryProperty(
code, UCHAR_UPPERCASE);
bool icu_prop_cased = u_hasBinaryProperty(
code, UCHAR_CASED);
bool icu_prop_case_ignorable = u_hasBinaryProperty(
code, UCHAR_CASE_IGNORABLE);
bool icu_prop_white_space = u_hasBinaryProperty(
code, UCHAR_WHITE_SPACE);
bool icu_prop_hex_digit = u_hasBinaryProperty(
code, UCHAR_HEX_DIGIT);
bool icu_prop_join_control = u_hasBinaryProperty(
code, UCHAR_JOIN_CONTROL);
/*
* Compare with ICU for character classes using:
*
* https://unicode-org.github.io/icu-docs/apidoc/dev/icu4c/uchar_8h.html#details
*
* which describes how to use ICU to test for membership in regex
* character classes.
*
* NB: the document suggests testing for some properties such as
* UCHAR_POSIX_ALNUM, but that doesn't mean that we're testing for the
* "POSIX Compatible" character classes.
*/
bool isalpha = pg_u_isalpha(code);
bool islower = pg_u_islower(code);
bool isupper = pg_u_isupper(code);
bool ispunct = pg_u_ispunct(code, false);
bool isdigit = pg_u_isdigit(code, false);
bool isxdigit = pg_u_isxdigit(code, false);
bool isalnum = pg_u_isalnum(code, false);
bool isspace = pg_u_isspace(code);
bool isblank = pg_u_isblank(code);
bool iscntrl = pg_u_iscntrl(code);
bool isgraph = pg_u_isgraph(code);
bool isprint = pg_u_isprint(code);
bool icu_isalpha = u_isUAlphabetic(code);
bool icu_islower = u_isULowercase(code);
bool icu_isupper = u_isUUppercase(code);
bool icu_ispunct = u_ispunct(code);
bool icu_isdigit = u_isdigit(code);
bool icu_isxdigit = u_hasBinaryProperty(code,
UCHAR_POSIX_XDIGIT);
bool icu_isalnum = u_hasBinaryProperty(code,
UCHAR_POSIX_ALNUM);
bool icu_isspace = u_isUWhiteSpace(code);
bool icu_isblank = u_isblank(code);
bool icu_iscntrl = icu_category == PG_U_CONTROL;
bool icu_isgraph = u_hasBinaryProperty(code,
UCHAR_POSIX_GRAPH);
bool icu_isprint = u_hasBinaryProperty(code,
UCHAR_POSIX_PRINT);
/*
* A version mismatch means that some assigned codepoints in the newer
* version may be unassigned in the older version. That's OK, though
* the test will not cover those codepoints marked unassigned in the
* older version (that is, it will no longer be an exhaustive test).
*/
if (pg_category == PG_U_UNASSIGNED &&
icu_category != PG_U_UNASSIGNED &&
pg_unicode_version < icu_unicode_version)
{
pg_skipped_codepoints++;
continue;
}
if (icu_category == PG_U_UNASSIGNED &&
pg_category != PG_U_UNASSIGNED &&
icu_unicode_version < pg_unicode_version)
{
icu_skipped_codepoints++;
continue;
}
if (pg_category != icu_category)
{
/*
* A version mismatch means that some assigned codepoints in the
* newer version may be unassigned in the older version. That's
* OK, though the test will not cover those codepoints marked
* unassigned in the older version (that is, it will no longer be
* an exhaustive test).
*/
if (pg_category == PG_U_UNASSIGNED &&
pg_unicode_version < icu_unicode_version)
pg_skipped_codepoints++;
else if (icu_category == PG_U_UNASSIGNED &&
icu_unicode_version < pg_unicode_version)
icu_skipped_codepoints++;
else
{
printf("category_test: FAILURE for codepoint 0x%06x\n", code);
printf("category_test: Postgres category: %02d %s %s\n", pg_category,
unicode_category_abbrev(pg_category),
unicode_category_string(pg_category));
printf("category_test: ICU category: %02d %s %s\n", icu_category,
unicode_category_abbrev(icu_category),
unicode_category_string(icu_category));
printf("\n");
exit(1);
}
printf("category_test: FAILURE for codepoint 0x%06x\n", code);
printf("category_test: Postgres category: %02d %s %s\n", pg_category,
unicode_category_abbrev(pg_category),
unicode_category_string(pg_category));
printf("category_test: ICU category: %02d %s %s\n", icu_category,
unicode_category_abbrev(icu_category),
unicode_category_string(icu_category));
printf("\n");
exit(1);
}
if (prop_alphabetic != icu_prop_alphabetic ||
prop_lowercase != icu_prop_lowercase ||
prop_uppercase != icu_prop_uppercase ||
prop_cased != icu_prop_cased ||
prop_case_ignorable != icu_prop_case_ignorable ||
prop_white_space != icu_prop_white_space ||
prop_hex_digit != icu_prop_hex_digit ||
prop_join_control != icu_prop_join_control)
{
printf("category_test: FAILURE for codepoint 0x%06x\n", code);
printf("category_test: Postgres property alphabetic/lowercase/uppercase/cased/case_ignorable/white_space/hex_digit/join_control: %d/%d/%d/%d/%d/%d/%d/%d\n",
prop_alphabetic, prop_lowercase, prop_uppercase,
prop_cased, prop_case_ignorable,
prop_white_space, prop_hex_digit, prop_join_control);
printf("category_test: ICU property alphabetic/lowercase/uppercase/cased/case_ignorable/white_space/hex_digit/join_control: %d/%d/%d/%d/%d/%d/%d/%d\n",
icu_prop_alphabetic, icu_prop_lowercase, icu_prop_uppercase,
icu_prop_cased, icu_prop_case_ignorable,
icu_prop_white_space, icu_prop_hex_digit, icu_prop_join_control);
printf("\n");
exit(1);
}
if (isalpha != icu_isalpha ||
islower != icu_islower ||
isupper != icu_isupper ||
ispunct != icu_ispunct ||
isdigit != icu_isdigit ||
isxdigit != icu_isxdigit ||
isalnum != icu_isalnum ||
isspace != icu_isspace ||
isblank != icu_isblank ||
iscntrl != icu_iscntrl ||
isgraph != icu_isgraph ||
isprint != icu_isprint)
{
printf("category_test: FAILURE for codepoint 0x%06x\n", code);
printf("category_test: Postgres class alpha/lower/upper/punct/digit/xdigit/alnum/space/blank/cntrl/graph/print: %d/%d/%d/%d/%d/%d/%d/%d/%d/%d/%d/%d\n",
isalpha, islower, isupper, ispunct, isdigit, isxdigit, isalnum, isspace, isblank, iscntrl, isgraph, isprint);
printf("category_test: ICU class alpha/lower/upper/punct/digit/xdigit/alnum/space/blank/cntrl/graph/print: %d/%d/%d/%d/%d/%d/%d/%d/%d/%d/%d/%d\n",
icu_isalpha, icu_islower, icu_isupper, icu_ispunct, icu_isdigit, icu_isxdigit, icu_isalnum, icu_isspace, icu_isblank, icu_iscntrl, icu_isgraph, icu_isprint);
printf("\n");
exit(1);
}
if (pg_category != PG_U_UNASSIGNED)
successful++;
}
if (pg_skipped_codepoints > 0)
@ -99,10 +223,22 @@ main(int argc, char **argv)
printf("category_test: skipped %d codepoints unassigned in ICU due to Unicode version mismatch\n",
icu_skipped_codepoints);
printf("category_test: success\n");
exit(0);
printf("category_test: ICU test: %d codepoints successful\n", successful);
}
#endif
int
main(int argc, char **argv)
{
pg_unicode_version = parse_unicode_version(PG_UNICODE_VERSION);
printf("category_test: Postgres Unicode version:\t%s\n", PG_UNICODE_VERSION);
#ifdef USE_ICU
icu_unicode_version = parse_unicode_version(U_UNICODE_VERSION);
printf("category_test: ICU Unicode version:\t\t%s\n", U_UNICODE_VERSION);
icu_test();
#else
printf("category_test: ICU support required for test; skipping\n");
exit(0);
printf("category_test: ICU not available; skipping\n");
#endif
}

View File

@ -25,6 +25,10 @@ my $output_table_file = "$output_path/unicode_category_table.h";
my $FH;
# create a table of all codepoints < 0x80 and their associated
# categories and properties for fast lookups
my %opt_ascii = ();
# Read entries from UnicodeData.txt into a list of codepoint ranges
# and their general category.
my @category_ranges = ();
@ -48,21 +52,42 @@ while (my $line = <$FH>)
my $category = $elts[2];
die "codepoint out of range" if $code > 0x10FFFF;
die "unassigned codepoint in UnicodeData.txt" if $category eq $CATEGORY_UNASSIGNED;
die "unassigned codepoint in UnicodeData.txt"
if $category eq $CATEGORY_UNASSIGNED;
if (!defined($range_start)) {
if ($code < 0x80)
{
my @properties = ();
# No ASCII characters have category Titlecase_Letter,
# but include here for completeness.
push @properties, "PG_U_PROP_CASED" if ($category eq 'Lt');
$opt_ascii{$code} = {
Category => $category,
Properties => \@properties
};
}
if (!defined($range_start))
{
my $code_str = sprintf "0x%06x", $code;
die if defined($range_end) || defined($range_category) || defined($gap_category);
die
if defined($range_end)
|| defined($range_category)
|| defined($gap_category);
die "unexpected first entry <..., Last>" if ($name =~ /Last>/);
die "expected 0x000000 for first entry, got $code_str" if $code != 0x000000;
die "expected 0x000000 for first entry, got $code_str"
if $code != 0x000000;
# initialize
$range_start = $code;
$range_end = $code;
$range_category = $category;
if ($name =~ /<.*, First>$/) {
if ($name =~ /<.*, First>$/)
{
$gap_category = $category;
} else {
}
else
{
$gap_category = $CATEGORY_UNASSIGNED;
}
next;
@ -71,10 +96,17 @@ while (my $line = <$FH>)
# Gap in codepoints detected. If it's a different category than
# the current range, emit the current range and initialize a new
# range representing the gap.
if ($range_end + 1 != $code && $range_category ne $gap_category) {
if ($range_category ne $CATEGORY_UNASSIGNED) {
push(@category_ranges, {start => $range_start, end => $range_end,
category => $range_category});
if ($range_end + 1 != $code && $range_category ne $gap_category)
{
if ($range_category ne $CATEGORY_UNASSIGNED)
{
push(
@category_ranges,
{
start => $range_start,
end => $range_end,
category => $range_category
});
}
$range_start = $range_end + 1;
$range_end = $code - 1;
@ -82,27 +114,39 @@ while (my $line = <$FH>)
}
# different category; new range
if ($range_category ne $category) {
if ($range_category ne $CATEGORY_UNASSIGNED) {
push(@category_ranges, {start => $range_start, end => $range_end,
category => $range_category});
if ($range_category ne $category)
{
if ($range_category ne $CATEGORY_UNASSIGNED)
{
push(
@category_ranges,
{
start => $range_start,
end => $range_end,
category => $range_category
});
}
$range_start = $code;
$range_end = $code;
$range_category = $category;
}
if ($name =~ /<.*, First>$/) {
die "<..., First> entry unexpectedly follows another <..., First> entry"
if ($name =~ /<.*, First>$/)
{
die
"<..., First> entry unexpectedly follows another <..., First> entry"
if $gap_category ne $CATEGORY_UNASSIGNED;
$gap_category = $category;
}
elsif ($name =~ /<.*, Last>$/) {
die "<..., First> and <..., Last> entries have mismatching general category"
elsif ($name =~ /<.*, Last>$/)
{
die
"<..., First> and <..., Last> entries have mismatching general category"
if $gap_category ne $category;
$gap_category = $CATEGORY_UNASSIGNED;
}
else {
else
{
die "unexpected entry found between <..., First> and <..., Last>"
if $gap_category ne $CATEGORY_UNASSIGNED;
}
@ -115,13 +159,17 @@ die "<..., First> entry with no corresponding <..., Last> entry"
if $gap_category ne $CATEGORY_UNASSIGNED;
# emit final range
if ($range_category ne $CATEGORY_UNASSIGNED) {
push(@category_ranges, {start => $range_start, end => $range_end,
category => $range_category});
if ($range_category ne $CATEGORY_UNASSIGNED)
{
push(
@category_ranges,
{
start => $range_start,
end => $range_end,
category => $range_category
});
}
my $num_ranges = scalar @category_ranges;
# See: https://www.unicode.org/reports/tr44/#General_Category_Values
my $categories = {
Cn => 'PG_U_UNASSIGNED',
@ -156,11 +204,146 @@ my $categories = {
Pf => 'PG_U_FINAL_PUNCTUATION'
};
# Start writing out the output files
# Find White_Space and Hex_Digit characters
my @white_space = ();
my @hex_digits = ();
my @join_control = ();
open($FH, '<', "$output_path/PropList.txt")
or die "Could not open $output_path/PropList.txt: $!.";
while (my $line = <$FH>)
{
my $pattern = qr/([0-9A-F\.]+)\s*;\s*(\w+)\s*#.*/s;
next unless $line =~ $pattern;
my $code = $line =~ s/$pattern/$1/rg;
my $property = $line =~ s/$pattern/$2/rg;
my $start;
my $end;
if ($code =~ /\.\./)
{
# code range
my @sp = split /\.\./, $code;
$start = hex($sp[0]);
$end = hex($sp[1]);
}
else
{
# single code point
$start = hex($code);
$end = hex($code);
}
if ($property eq "White_Space")
{
push @white_space, { start => $start, end => $end };
for (my $i = $start; $i <= $end && $i < 0x80; $i++)
{
push @{ $opt_ascii{$i}{Properties} }, "PG_U_PROP_WHITE_SPACE";
}
}
elsif ($property eq "Hex_Digit")
{
push @hex_digits, { start => $start, end => $end };
for (my $i = $start; $i <= $end && $i < 0x80; $i++)
{
push @{ $opt_ascii{$i}{Properties} }, "PG_U_PROP_HEX_DIGIT";
}
}
elsif ($property eq "Join_Control")
{
push @join_control, { start => $start, end => $end };
for (my $i = $start; $i <= $end && $i < 0x80; $i++)
{
push @{ $opt_ascii{$i}{Properties} }, "PG_U_PROP_JOIN_CONTROL";
}
}
}
# Find Alphabetic, Lowercase, and Uppercase characters
my @alphabetic = ();
my @lowercase = ();
my @uppercase = ();
my @case_ignorable = ();
open($FH, '<', "$output_path/DerivedCoreProperties.txt")
or die "Could not open $output_path/DerivedCoreProperties.txt: $!.";
while (my $line = <$FH>)
{
my $pattern = qr/^([0-9A-F\.]+)\s*;\s*(\w+)\s*#.*$/s;
next unless $line =~ $pattern;
my $code = $line =~ s/$pattern/$1/rg;
my $property = $line =~ s/$pattern/$2/rg;
my $start;
my $end;
if ($code =~ /\.\./)
{
# code range
my @sp = split /\.\./, $code;
die "line: {$line} code: {$code} sp[0] {$sp[0]} sp[1] {$sp[1]}"
unless $sp[0] =~ /^[0-9A-F]+$/ && $sp[1] =~ /^[0-9A-F]+$/;
$start = hex($sp[0]);
$end = hex($sp[1]);
}
else
{
die "line: {$line} code: {$code}" unless $code =~ /^[0-9A-F]+$/;
# single code point
$start = hex($code);
$end = hex($code);
}
if ($property eq "Alphabetic")
{
push @alphabetic, { start => $start, end => $end };
for (my $i = $start; $i <= $end && $i < 0x80; $i++)
{
push @{ $opt_ascii{$i}{Properties} }, "PG_U_PROP_ALPHABETIC";
}
}
elsif ($property eq "Lowercase")
{
push @lowercase, { start => $start, end => $end };
for (my $i = $start; $i <= $end && $i < 0x80; $i++)
{
push @{ $opt_ascii{$i}{Properties} }, "PG_U_PROP_LOWERCASE";
push @{ $opt_ascii{$i}{Properties} }, "PG_U_PROP_CASED";
}
}
elsif ($property eq "Uppercase")
{
push @uppercase, { start => $start, end => $end };
for (my $i = $start; $i <= $end && $i < 0x80; $i++)
{
push @{ $opt_ascii{$i}{Properties} }, "PG_U_PROP_UPPERCASE";
push @{ $opt_ascii{$i}{Properties} }, "PG_U_PROP_CASED";
}
}
elsif ($property eq "Case_Ignorable")
{
push @case_ignorable, { start => $start, end => $end };
for (my $i = $start; $i <= $end && $i < 0x80; $i++)
{
push @{ $opt_ascii{$i}{Properties} }, "PG_U_PROP_CASE_IGNORABLE";
}
}
}
my $num_category_ranges = scalar @category_ranges;
my $num_alphabetic_ranges = scalar @alphabetic;
my $num_lowercase_ranges = scalar @lowercase;
my $num_uppercase_ranges = scalar @uppercase;
my $num_case_ignorable_ranges = scalar @case_ignorable;
my $num_white_space_ranges = scalar @white_space;
my $num_hex_digit_ranges = scalar @hex_digits;
my $num_join_control_ranges = scalar @join_control;
# Start writing out the output file
open my $OT, '>', $output_table_file
or die "Could not open output file $output_table_file: $!\n";
print $OT <<HEADER;
print $OT <<"EOS";
/*-------------------------------------------------------------------------
*
* unicode_category_table.h
@ -188,18 +371,153 @@ typedef struct
uint8 category; /* General Category */
} pg_category_range;
/* table of Unicode codepoint ranges and their categories */
static const pg_category_range unicode_categories[$num_ranges] =
typedef struct
{
HEADER
uint32 first; /* Unicode codepoint */
uint32 last; /* Unicode codepoint */
} pg_unicode_range;
my $firsttime = 1;
foreach my $range (@category_ranges) {
printf $OT ",\n" unless $firsttime;
$firsttime = 0;
typedef struct
{
uint8 category;
uint8 properties;
} pg_unicode_properties;
my $category = $categories->{$range->{category}};
die "category missing: $range->{category}" unless $category;
printf $OT "\t{0x%06x, 0x%06x, %s}", $range->{start}, $range->{end}, $category;
/*
* The properties currently used, in no particular order. Fits in a uint8, but
* if more properties are added, a wider integer will be needed.
*/
#define PG_U_PROP_ALPHABETIC (1 << 0)
#define PG_U_PROP_LOWERCASE (1 << 1)
#define PG_U_PROP_UPPERCASE (1 << 2)
#define PG_U_PROP_CASED (1 << 3)
#define PG_U_PROP_CASE_IGNORABLE (1 << 4)
#define PG_U_PROP_WHITE_SPACE (1 << 5)
#define PG_U_PROP_JOIN_CONTROL (1 << 6)
#define PG_U_PROP_HEX_DIGIT (1 << 7)
EOS
print $OT <<"EOS";
/* table for fast lookup of ASCII codepoints */
static const pg_unicode_properties unicode_opt_ascii[128] =
{
EOS
for (my $i = 0; $i < 128; $i++)
{
my $category_str = $categories->{ $opt_ascii{$i}->{Category} };
my $props_str = (join ' | ', @{ $opt_ascii{$i}{Properties} }) || "0";
printf $OT
"\t{\n\t\t/* 0x%06x */\n\t\t.category = %s,\n\t\t.properties = %s\n\t},\n",
$i, $category_str, $props_str;
}
print $OT "\n};\n";
print $OT "};\n\n";
print $OT <<"EOS";
/* table of Unicode codepoint ranges and their categories */
static const pg_category_range unicode_categories[$num_category_ranges] =
{
EOS
foreach my $range (@category_ranges)
{
my $category = $categories->{ $range->{category} };
die "category missing: $range->{category}" unless $category;
printf $OT "\t{0x%06x, 0x%06x, %s},\n", $range->{start}, $range->{end},
$category;
}
print $OT "};\n\n";
print $OT <<"EOS";
/* table of Unicode codepoint ranges of Alphabetic characters */
static const pg_unicode_range unicode_alphabetic[$num_alphabetic_ranges] =
{
EOS
foreach my $range (@alphabetic)
{
printf $OT "\t{0x%06x, 0x%06x},\n", $range->{start}, $range->{end};
}
print $OT "};\n\n";
print $OT <<"EOS";
/* table of Unicode codepoint ranges of Lowercase characters */
static const pg_unicode_range unicode_lowercase[$num_lowercase_ranges] =
{
EOS
foreach my $range (@lowercase)
{
printf $OT "\t{0x%06x, 0x%06x},\n", $range->{start}, $range->{end};
}
print $OT "};\n\n";
print $OT <<"EOS";
/* table of Unicode codepoint ranges of Uppercase characters */
static const pg_unicode_range unicode_uppercase[$num_uppercase_ranges] =
{
EOS
foreach my $range (@uppercase)
{
printf $OT "\t{0x%06x, 0x%06x},\n", $range->{start}, $range->{end};
}
print $OT "};\n\n";
print $OT <<"EOS";
/* table of Unicode codepoint ranges of Case_Ignorable characters */
static const pg_unicode_range unicode_case_ignorable[$num_case_ignorable_ranges] =
{
EOS
foreach my $range (@case_ignorable)
{
printf $OT "\t{0x%06x, 0x%06x},\n", $range->{start}, $range->{end};
}
print $OT "};\n\n";
print $OT <<"EOS";
/* table of Unicode codepoint ranges of White_Space characters */
static const pg_unicode_range unicode_white_space[$num_white_space_ranges] =
{
EOS
foreach my $range (@white_space)
{
printf $OT "\t{0x%06x, 0x%06x},\n", $range->{start}, $range->{end};
}
print $OT "};\n\n";
print $OT <<"EOS";
/* table of Unicode codepoint ranges of Hex_Digit characters */
static const pg_unicode_range unicode_hex_digit[$num_hex_digit_ranges] =
{
EOS
foreach my $range (@hex_digits)
{
printf $OT "\t{0x%06x, 0x%06x},\n", $range->{start}, $range->{end};
}
print $OT "};\n\n";
print $OT <<"EOS";
/* table of Unicode codepoint ranges of Join_Control characters */
static const pg_unicode_range unicode_join_control[$num_join_control_ranges] =
{
EOS
foreach my $range (@join_control)
{
printf $OT "\t{0x%06x, 0x%06x},\n", $range->{start}, $range->{end};
}
print $OT "};\n";

View File

@ -11,7 +11,7 @@ endif
# These files are part of the Unicode Character Database. Download them on
# demand.
foreach f : ['CompositionExclusions.txt', 'DerivedNormalizationProps.txt', 'EastAsianWidth.txt', 'NormalizationTest.txt', 'UnicodeData.txt']
foreach f : ['CompositionExclusions.txt', 'DerivedCoreProperties.txt', 'DerivedNormalizationProps.txt', 'EastAsianWidth.txt', 'NormalizationTest.txt', 'PropList.txt', 'UnicodeData.txt']
url = unicode_baseurl.format(UNICODE_VERSION, f)
target = custom_target(f,
output: f,
@ -26,7 +26,7 @@ update_unicode_targets = []
update_unicode_targets += \
custom_target('unicode_category_table.h',
input: [unicode_data['UnicodeData.txt']],
input: [unicode_data['UnicodeData.txt'], unicode_data['DerivedCoreProperties.txt'], unicode_data['PropList.txt']],
output: ['unicode_category_table.h'],
command: [
perl, files('generate-unicode_category_table.pl'),

View File

@ -1,6 +1,8 @@
/*-------------------------------------------------------------------------
* unicode_category.c
* Determine general category of Unicode characters.
* Determine general category and character properties of Unicode
* characters. Encoding must be UTF8, where we assume that the pg_wchar
* representation is a code point.
*
* Portions Copyright (c) 2017-2024, PostgreSQL Global Development Group
*
@ -18,24 +20,85 @@
#include "common/unicode_category.h"
#include "common/unicode_category_table.h"
/*
* Create bitmasks from pg_unicode_category values for efficient comparison of
* multiple categories. For instance, PG_U_MN_MASK is a bitmask representing
* the general cateogry Mn; and PG_U_M_MASK represents general categories Mn,
* Me, and Mc.
*
* The number of Unicode General Categories should never grow, so a 32-bit
* mask is fine.
*/
#define PG_U_CATEGORY_MASK(X) ((uint32)(1 << (X)))
#define PG_U_LU_MASK PG_U_CATEGORY_MASK(PG_U_UPPERCASE_LETTER)
#define PG_U_LL_MASK PG_U_CATEGORY_MASK(PG_U_LOWERCASE_LETTER)
#define PG_U_LT_MASK PG_U_CATEGORY_MASK(PG_U_TITLECASE_LETTER)
#define PG_U_LC_MASK (PG_U_LU_MASK|PG_U_LL_MASK|PG_U_LT_MASK)
#define PG_U_LM_MASK PG_U_CATEGORY_MASK(PG_U_MODIFIER_LETTER)
#define PG_U_LO_MASK PG_U_CATEGORY_MASK(PG_U_OTHER_LETTER)
#define PG_U_L_MASK (PG_U_LU_MASK|PG_U_LL_MASK|PG_U_LT_MASK|PG_U_LM_MASK|\
PG_U_LO_MASK)
#define PG_U_MN_MASK PG_U_CATEGORY_MASK(PG_U_NONSPACING_MARK)
#define PG_U_ME_MASK PG_U_CATEGORY_MASK(PG_U_ENCLOSING_MARK)
#define PG_U_MC_MASK PG_U_CATEGORY_MASK(PG_U_SPACING_MARK)
#define PG_U_M_MASK (PG_U_MN_MASK|PG_U_MC_MASK|PG_U_ME_MASK)
#define PG_U_ND_MASK PG_U_CATEGORY_MASK(PG_U_DECIMAL_NUMBER)
#define PG_U_NL_MASK PG_U_CATEGORY_MASK(PG_U_LETTER_NUMBER)
#define PG_U_NO_MASK PG_U_CATEGORY_MASK(PG_U_OTHER_NUMBER)
#define PG_U_N_MASK (PG_U_ND_MASK|PG_U_NL_MASK|PG_U_NO_MASK)
#define PG_U_PC_MASK PG_U_CATEGORY_MASK(PG_U_CONNECTOR_PUNCTUATION)
#define PG_U_PD_MASK PG_U_CATEGORY_MASK(PG_U_DASH_PUNCTUATION)
#define PG_U_PS_MASK PG_U_CATEGORY_MASK(PG_U_OPEN_PUNCTUATION)
#define PG_U_PE_MASK PG_U_CATEGORY_MASK(PG_U_CLOSE_PUNCTUATION)
#define PG_U_PI_MASK PG_U_CATEGORY_MASK(PG_U_INITIAL_PUNCTUATION)
#define PG_U_PF_MASK PG_U_CATEGORY_MASK(PG_U_FINAL_PUNCTUATION)
#define PG_U_PO_MASK PG_U_CATEGORY_MASK(PG_U_OTHER_PUNCTUATION)
#define PG_U_P_MASK (PG_U_PC_MASK|PG_U_PD_MASK|PG_U_PS_MASK|PG_U_PE_MASK|\
PG_U_PI_MASK|PG_U_PF_MASK|PG_U_PO_MASK)
#define PG_U_SM_MASK PG_U_CATEGORY_MASK(PG_U_MATH_SYMBOL)
#define PG_U_SC_MASK PG_U_CATEGORY_MASK(PG_U_CURRENCY_SYMBOL)
#define PG_U_SK_MASK PG_U_CATEGORY_MASK(PG_U_MODIFIER_SYMBOL)
#define PG_U_SO_MASK PG_U_CATEGORY_MASK(PG_U_OTHER_SYMBOL)
#define PG_U_S_MASK (PG_U_SM_MASK|PG_U_SC_MASK|PG_U_SK_MASK|PG_U_SO_MASK)
#define PG_U_ZS_MASK PG_U_CATEGORY_MASK(PG_U_SPACE_SEPARATOR)
#define PG_U_ZL_MASK PG_U_CATEGORY_MASK(PG_U_LINE_SEPARATOR)
#define PG_U_ZP_MASK PG_U_CATEGORY_MASK(PG_U_PARAGRAPH_SEPARATOR)
#define PG_U_Z_MASK (PG_U_ZS_MASK|PG_U_ZL_MASK|PG_U_ZP_MASK)
#define PG_U_CC_MASK PG_U_CATEGORY_MASK(PG_U_CONTROL)
#define PG_U_CF_MASK PG_U_CATEGORY_MASK(PG_U_FORMAT)
#define PG_U_CS_MASK PG_U_CATEGORY_MASK(PG_U_SURROGATE)
#define PG_U_CO_MASK PG_U_CATEGORY_MASK(PG_U_PRIVATE_USE)
#define PG_U_CN_MASK PG_U_CATEGORY_MASK(PG_U_UNASSIGNED)
#define PG_U_C_MASK (PG_U_CC_MASK|PG_U_CF_MASK|PG_U_CS_MASK|PG_U_CO_MASK|\
PG_U_CN_MASK)
#define PG_U_CHARACTER_TAB 0x09
static bool range_search(const pg_unicode_range * tbl, size_t size,
pg_wchar code);
/*
* Unicode general category for the given codepoint.
*/
pg_unicode_category
unicode_category(pg_wchar ucs)
unicode_category(pg_wchar code)
{
int min = 0;
int mid;
int max = lengthof(unicode_categories) - 1;
Assert(ucs <= 0x10ffff);
Assert(code <= 0x10ffff);
if (code < 0x80)
return unicode_opt_ascii[code].category;
while (max >= min)
{
mid = (min + max) / 2;
if (ucs > unicode_categories[mid].last)
if (code > unicode_categories[mid].last)
min = mid + 1;
else if (ucs < unicode_categories[mid].first)
else if (code < unicode_categories[mid].first)
max = mid - 1;
else
return unicode_categories[mid].category;
@ -44,6 +107,224 @@ unicode_category(pg_wchar ucs)
return PG_U_UNASSIGNED;
}
bool
pg_u_prop_alphabetic(pg_wchar code)
{
if (code < 0x80)
return unicode_opt_ascii[code].properties & PG_U_PROP_ALPHABETIC;
return range_search(unicode_alphabetic,
lengthof(unicode_alphabetic),
code);
}
bool
pg_u_prop_lowercase(pg_wchar code)
{
if (code < 0x80)
return unicode_opt_ascii[code].properties & PG_U_PROP_LOWERCASE;
return range_search(unicode_lowercase,
lengthof(unicode_lowercase),
code);
}
bool
pg_u_prop_uppercase(pg_wchar code)
{
if (code < 0x80)
return unicode_opt_ascii[code].properties & PG_U_PROP_UPPERCASE;
return range_search(unicode_uppercase,
lengthof(unicode_uppercase),
code);
}
bool
pg_u_prop_cased(pg_wchar code)
{
uint32 category_mask;
if (code < 0x80)
return unicode_opt_ascii[code].properties & PG_U_PROP_CASED;
category_mask = PG_U_CATEGORY_MASK(unicode_category(code));
return category_mask & PG_U_LT_MASK ||
pg_u_prop_lowercase(code) ||
pg_u_prop_uppercase(code);
}
bool
pg_u_prop_case_ignorable(pg_wchar code)
{
if (code < 0x80)
return unicode_opt_ascii[code].properties & PG_U_PROP_CASE_IGNORABLE;
return range_search(unicode_case_ignorable,
lengthof(unicode_case_ignorable),
code);
}
bool
pg_u_prop_white_space(pg_wchar code)
{
if (code < 0x80)
return unicode_opt_ascii[code].properties & PG_U_PROP_WHITE_SPACE;
return range_search(unicode_white_space,
lengthof(unicode_white_space),
code);
}
bool
pg_u_prop_hex_digit(pg_wchar code)
{
if (code < 0x80)
return unicode_opt_ascii[code].properties & PG_U_PROP_HEX_DIGIT;
return range_search(unicode_hex_digit,
lengthof(unicode_hex_digit),
code);
}
bool
pg_u_prop_join_control(pg_wchar code)
{
if (code < 0x80)
return unicode_opt_ascii[code].properties & PG_U_PROP_JOIN_CONTROL;
return range_search(unicode_join_control,
lengthof(unicode_join_control),
code);
}
/*
* The following functions implement the Compatibility Properties described
* at: http://www.unicode.org/reports/tr18/#Compatibility_Properties
*
* If 'posix' is true, implements the "POSIX Compatible" variant, otherwise
* the "Standard" variant.
*/
bool
pg_u_isdigit(pg_wchar code, bool posix)
{
if (posix)
return ('0' <= code && code <= '9');
else
return unicode_category(code) == PG_U_DECIMAL_NUMBER;
}
bool
pg_u_isalpha(pg_wchar code)
{
return pg_u_prop_alphabetic(code);
}
bool
pg_u_isalnum(pg_wchar code, bool posix)
{
return pg_u_isalpha(code) || pg_u_isdigit(code, posix);
}
bool
pg_u_isword(pg_wchar code)
{
uint32 category_mask = PG_U_CATEGORY_MASK(unicode_category(code));
return
category_mask & (PG_U_M_MASK | PG_U_ND_MASK | PG_U_PC_MASK) ||
pg_u_isalpha(code) ||
pg_u_prop_join_control(code);
}
bool
pg_u_isupper(pg_wchar code)
{
return pg_u_prop_uppercase(code);
}
bool
pg_u_islower(pg_wchar code)
{
return pg_u_prop_lowercase(code);
}
bool
pg_u_isblank(pg_wchar code)
{
return code == PG_U_CHARACTER_TAB ||
unicode_category(code) == PG_U_SPACE_SEPARATOR;
}
bool
pg_u_iscntrl(pg_wchar code)
{
return unicode_category(code) == PG_U_CONTROL;
}
bool
pg_u_isgraph(pg_wchar code)
{
uint32 category_mask = PG_U_CATEGORY_MASK(unicode_category(code));
if (category_mask & (PG_U_CC_MASK | PG_U_CS_MASK | PG_U_CN_MASK) ||
pg_u_isspace(code))
return false;
return true;
}
bool
pg_u_isprint(pg_wchar code)
{
pg_unicode_category category = unicode_category(code);
if (category == PG_U_CONTROL)
return false;
return pg_u_isgraph(code) || pg_u_isblank(code);
}
bool
pg_u_ispunct(pg_wchar code, bool posix)
{
uint32 category_mask;
if (posix)
{
if (pg_u_isalpha(code))
return false;
category_mask = PG_U_CATEGORY_MASK(unicode_category(code));
return category_mask & (PG_U_P_MASK | PG_U_S_MASK);
}
else
{
category_mask = PG_U_CATEGORY_MASK(unicode_category(code));
return category_mask & PG_U_P_MASK;
}
}
bool
pg_u_isspace(pg_wchar code)
{
return pg_u_prop_white_space(code);
}
bool
pg_u_isxdigit(pg_wchar code, bool posix)
{
if (posix)
return (('0' <= code && code <= '9') ||
('A' <= code && code <= 'F') ||
('a' <= code && code <= 'f'));
else
return unicode_category(code) == PG_U_DECIMAL_NUMBER ||
pg_u_prop_hex_digit(code);
}
/*
* Description of Unicode general category.
*/
@ -191,3 +472,30 @@ unicode_category_abbrev(pg_unicode_category category)
Assert(false);
return "??"; /* keep compiler quiet */
}
/*
* Binary search to test if given codepoint exists in one of the ranges in the
* given table.
*/
static bool
range_search(const pg_unicode_range * tbl, size_t size, pg_wchar code)
{
int min = 0;
int mid;
int max = size - 1;
Assert(code <= 0x10ffff);
while (max >= min)
{
mid = (min + max) / 2;
if (code > tbl[mid].last)
min = mid + 1;
else if (code < tbl[mid].first)
max = mid - 1;
else
return true;
}
return false;
}

View File

@ -62,7 +62,30 @@ typedef enum pg_unicode_category
} pg_unicode_category;
extern pg_unicode_category unicode_category(pg_wchar ucs);
const char *unicode_category_string(pg_unicode_category category);
const char *unicode_category_abbrev(pg_unicode_category category);
extern const char *unicode_category_string(pg_unicode_category category);
extern const char *unicode_category_abbrev(pg_unicode_category category);
extern bool pg_u_prop_alphabetic(pg_wchar c);
extern bool pg_u_prop_lowercase(pg_wchar c);
extern bool pg_u_prop_uppercase(pg_wchar c);
extern bool pg_u_prop_cased(pg_wchar c);
extern bool pg_u_prop_case_ignorable(pg_wchar c);
extern bool pg_u_prop_white_space(pg_wchar c);
extern bool pg_u_prop_hex_digit(pg_wchar c);
extern bool pg_u_prop_join_control(pg_wchar c);
extern bool pg_u_isdigit(pg_wchar c, bool posix);
extern bool pg_u_isalpha(pg_wchar c);
extern bool pg_u_isalnum(pg_wchar c, bool posix);
extern bool pg_u_isword(pg_wchar c);
extern bool pg_u_isupper(pg_wchar c);
extern bool pg_u_islower(pg_wchar c);
extern bool pg_u_isblank(pg_wchar c);
extern bool pg_u_iscntrl(pg_wchar c);
extern bool pg_u_isgraph(pg_wchar c);
extern bool pg_u_isprint(pg_wchar c);
extern bool pg_u_ispunct(pg_wchar c, bool posix);
extern bool pg_u_isspace(pg_wchar c);
extern bool pg_u_isxdigit(pg_wchar c, bool posix);
#endif /* UNICODE_CATEGORY_H */

File diff suppressed because it is too large Load Diff