Use int32_t instead of plain int with Unicode code points
On some architectures, int just isn't big enough to hold all Unicode code points.
This commit is contained in:
@@ -6,8 +6,9 @@
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
|
||||
int utf8_encode(int codepoint, char *buffer, int *size)
|
||||
int utf8_encode(int32_t codepoint, char *buffer, int *size)
|
||||
{
|
||||
if(codepoint < 0)
|
||||
return -1;
|
||||
@@ -81,7 +82,8 @@ int utf8_check_first(char byte)
|
||||
|
||||
int utf8_check_full(const char *buffer, int size)
|
||||
{
|
||||
int i, value = 0;
|
||||
int i;
|
||||
int32_t value = 0;
|
||||
unsigned char u = (unsigned char)buffer[0];
|
||||
|
||||
if(size == 2)
|
||||
|
||||
Reference in New Issue
Block a user