Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Custom-metadata, exists, info methods #1023

Draft
wants to merge 4 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 19 additions & 1 deletion infra/storage_client/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,14 @@ services:
FILE_STORAGE_BACKEND_PATH: /tmp/storage
ENABLE_IMAGE_TRANSFORMATION: "true"
IMGPROXY_URL: http://imgproxy:8080
DEBUG: "knex:*"

volumes:
- assets-volume:/tmp/storage
healthcheck:
test: ['CMD-SHELL', 'curl -f -LI http://localhost:5000/status']
interval: 2s

db:
build:
context: ./postgres
Expand All @@ -62,6 +66,20 @@ services:
timeout: 5s
retries: 5

dummy_data:
build:
context: ./postgres
depends_on:
storage:
condition: service_healthy
volumes:
- ./postgres:/sql
command:
- psql
- "postgresql://postgres:postgres@db:5432/postgres"
- -f
- /sql/dummy-data.sql

imgproxy:
image: darthsim/imgproxy
ports:
Expand All @@ -73,4 +91,4 @@ services:
- IMGPROXY_USE_ETAG=true
- IMGPROXY_ENABLE_WEBP_DETECTION=true
volumes:
assets-volume:
assets-volume:
3 changes: 1 addition & 2 deletions infra/storage_client/postgres/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ FROM supabase/postgres:0.13.0
COPY 00-initial-schema.sql /docker-entrypoint-initdb.d/00-initial-schema.sql
COPY auth-schema.sql /docker-entrypoint-initdb.d/01-auth-schema.sql
COPY storage-schema.sql /docker-entrypoint-initdb.d/02-storage-schema.sql
COPY dummy-data.sql /docker-entrypoint-initdb.d/03-dummy-data.sql

# Build time defaults
ARG build_POSTGRES_DB=postgres
Expand All @@ -17,4 +16,4 @@ ENV POSTGRES_USER=$build_POSTGRES_USER
ENV POSTGRES_PASSWORD=$build_POSTGRES_PASSWORD
ENV POSTGRES_PORT=$build_POSTGRES_PORT

EXPOSE 5432
EXPOSE 5432
18 changes: 7 additions & 11 deletions infra/storage_client/postgres/storage-schema.sql
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ CREATE TABLE "storage"."objects" (
"last_accessed_at" timestamptz DEFAULT now(),
"metadata" jsonb,
CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id"),
CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"),
PRIMARY KEY ("id")
);
CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING BTREE ("bucket_id","name");
Expand Down Expand Up @@ -85,27 +84,24 @@ CREATE OR REPLACE FUNCTION storage.search(prefix text, bucketname text, limits i
)
LANGUAGE plpgsql
AS $function$
DECLARE
_bucketId text;
BEGIN
select buckets."id" from buckets where buckets.name=bucketname limit 1 into _bucketId;
return query
return query
with files_folders as (
select ((string_to_array(objects.name, '/'))[levels]) as folder
from objects
where objects.name ilike prefix || '%'
and bucket_id = _bucketId
and bucket_id = bucketname
GROUP by folder
limit limits
offset offsets
)
select files_folders.folder as name, objects.id, objects.updated_at, objects.created_at, objects.last_accessed_at, objects.metadata from files_folders
)
select files_folders.folder as name, objects.id, objects.updated_at, objects.created_at, objects.last_accessed_at, objects.metadata from files_folders
left join objects
on prefix || files_folders.folder = objects.name
where objects.id is null or objects.bucket_id=_bucketId;
on prefix || files_folders.folder = objects.name and objects.bucket_id=bucketname;
END
$function$;

GRANT ALL PRIVILEGES ON SCHEMA storage TO postgres;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA storage TO postgres;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO postgres;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO postgres;

4 changes: 2 additions & 2 deletions infra/storage_client/storage/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
FROM supabase/storage-api:v0.35.1
FROM supabase/storage-api:v1.8.2

RUN apk add curl --no-cache
RUN apk add curl --no-cache
87 changes: 59 additions & 28 deletions packages/storage_client/lib/src/fetch.dart
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,18 @@ class Fetch {
return MediaType.parse(mime ?? 'application/octet-stream');
}

StorageException _handleError(dynamic error, StackTrace stack) {
StorageException _handleError(
dynamic error,
StackTrace stack,
FetchOptions? options,
) {
if (error is http.Response) {
if (options?.noResolveJson == true) {
return StorageException(
error.body.isEmpty ? error.reasonPhrase ?? '' : error.body,
statusCode: '${error.statusCode}',
);
}
try {
final data = json.decode(error.body) as Map<String, dynamic>;
return StorageException.fromJson(data, '${error.statusCode}');
Expand Down Expand Up @@ -70,7 +80,7 @@ class Fetch {
return _handleResponse(streamedResponse, options);
}

Future<dynamic> _handleMultipartRequest(
Future<dynamic> _handleFileRequest(
String method,
String url,
File file,
Expand All @@ -79,7 +89,6 @@ class Fetch {
int retryAttempts,
StorageRetryController? retryController,
) async {
final headers = options?.headers ?? {};
final contentType = fileOptions.contentType != null
? MediaType.parse(fileOptions.contentType!)
: _parseMediaType(file.path);
Expand All @@ -89,28 +98,15 @@ class Fetch {
filename: file.path,
contentType: contentType,
);
final request = http.MultipartRequest(method, Uri.parse(url))
..headers.addAll(headers)
..files.add(multipartFile)
..fields['cacheControl'] = fileOptions.cacheControl
..headers['x-upsert'] = fileOptions.upsert.toString();

final http.StreamedResponse streamedResponse;
final r = RetryOptions(maxAttempts: (retryAttempts + 1));
streamedResponse = await r.retry<http.StreamedResponse>(
() async {
if (httpClient != null) {
return httpClient!.send(request);
} else {
return request.send();
}
},
retryIf: (error) =>
retryController?.cancelled != true &&
(error is ClientException || error is TimeoutException),
return _handleMultipartRequest(
method,
url,
multipartFile,
fileOptions,
options,
retryAttempts,
retryController,
);

return _handleResponse(streamedResponse, options);
}

Future<dynamic> _handleBinaryFileRequest(
Expand All @@ -122,7 +118,6 @@ class Fetch {
int retryAttempts,
StorageRetryController? retryController,
) async {
final headers = options?.headers ?? {};
final contentType = fileOptions.contentType != null
? MediaType.parse(fileOptions.contentType!)
: _parseMediaType(url);
Expand All @@ -133,11 +128,38 @@ class Fetch {
filename: '',
contentType: contentType,
);
return _handleMultipartRequest(
method,
url,
multipartFile,
fileOptions,
options,
retryAttempts,
retryController,
);
}

Future<dynamic> _handleMultipartRequest(
String method,
String url,
MultipartFile multipartFile,
FileOptions fileOptions,
FetchOptions? options,
int retryAttempts,
StorageRetryController? retryController,
) async {
final headers = options?.headers ?? {};
final request = http.MultipartRequest(method, Uri.parse(url))
..headers.addAll(headers)
..files.add(multipartFile)
..fields['cacheControl'] = fileOptions.cacheControl
..headers['x-upsert'] = fileOptions.upsert.toString();
if (fileOptions.metadata != null) {
request.fields['metadata'] = json.encode(fileOptions.metadata);
}
if (fileOptions.headers != null) {
request.headers.addAll(fileOptions.headers!);
}

final http.StreamedResponse streamedResponse;
final r = RetryOptions(maxAttempts: (retryAttempts + 1));
Expand Down Expand Up @@ -170,10 +192,19 @@ class Fetch {
return jsonBody;
}
} else {
throw _handleError(response, StackTrace.current);
throw _handleError(response, StackTrace.current, options);
}
}

Future<dynamic> head(String url, {FetchOptions? options}) async {
return _handleRequest(
'HEAD',
url,
null,
FetchOptions(headers: options?.headers, noResolveJson: true),
);
}

Future<dynamic> get(String url, {FetchOptions? options}) async {
return _handleRequest('GET', url, null, options);
}
Expand Down Expand Up @@ -210,7 +241,7 @@ class Fetch {
required int retryAttempts,
required StorageRetryController? retryController,
}) async {
return _handleMultipartRequest('POST', url, file, fileOptions, options,
return _handleFileRequest('POST', url, file, fileOptions, options,
retryAttempts, retryController);
}

Expand All @@ -222,7 +253,7 @@ class Fetch {
required int retryAttempts,
required StorageRetryController? retryController,
}) async {
return _handleMultipartRequest(
return _handleFileRequest(
'PUT',
url,
file,
Expand Down
30 changes: 30 additions & 0 deletions packages/storage_client/lib/src/storage_file_api.dart
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,36 @@ class StorageFileApi {
return response as Uint8List;
}

/// Retrieves the details of an existing file
Future<FileObjectV2> info(String path) async {
final finalPath = _getFinalPath(path);
final options = FetchOptions(headers: headers);
final response = await _storageFetch.get(
'$url/object/info/$finalPath',
options: options,
);
final fileObjects = FileObjectV2.fromJson(response);
return fileObjects;
}

/// Checks the existence of a file
Future<bool> exists(String path) async {
final finalPath = _getFinalPath(path);
final options = FetchOptions(headers: headers);
try {
await _storageFetch.head(
'$url/object/$finalPath',
options: options,
);
return true;
} on StorageException catch (e) {
if (e.statusCode == '400' || e.statusCode == '404') {
return false;
}
rethrow;
}
}

/// Retrieve URLs for assets in public buckets
///
/// [path] is the file path to be downloaded, including the current file name.
Expand Down
57 changes: 57 additions & 0 deletions packages/storage_client/lib/src/types.dart
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,53 @@ class FileObject {
json['buckets'] != null ? Bucket.fromJson(json['buckets']) : null;
}

class FileObjectV2 {
final String id;
final String version;
final String name;
final String? bucketId;
final String? updatedAt;
final String createdAt;
final String? lastAccessedAt;
final int? size;
final String? cacheControl;
final String? contentType;
final String? etag;
final String? lastModified;
final Map<String, dynamic>? metadata;

const FileObjectV2({
required this.id,
required this.version,
required this.name,
required this.bucketId,
required this.updatedAt,
required this.createdAt,
required this.lastAccessedAt,
required this.size,
required this.cacheControl,
required this.contentType,
required this.etag,
required this.lastModified,
required this.metadata,
});

FileObjectV2.fromJson(Map<String, dynamic> json)
: id = json['id'] as String,
version = json['version'] as String,
name = json['name'] as String,
bucketId = json['bucket_id'] as String?,
updatedAt = json['updated_at'] as String?,
createdAt = json['created_at'] as String,
lastAccessedAt = json['last_accessed_at'] as String?,
size = json['size'] as int?,
cacheControl = json['cache_control'] as String?,
contentType = json['content_type'] as String?,
etag = json['etag'] as String?,
lastModified = json['last_modified'] as String?,
metadata = json['metadata'] as Map<String, dynamic>?;
}

/// [public] The visibility of the bucket. Public buckets don't require an
/// authorization token to download objects, but still require a valid token for
/// all other operations. By default, buckets are private.
Expand Down Expand Up @@ -115,10 +162,20 @@ class FileOptions {
/// Throws a FormatError if the media type is invalid.
final String? contentType;

/// The metadata option is an object that allows you to store additional
/// information about the file. This information can be used to filter and
/// search for files.
final Map<String, dynamic>? metadata;

/// Optionally add extra headers.
final Map<String, String>? headers;

const FileOptions({
this.cacheControl = '3600',
this.upsert = false,
this.contentType,
this.metadata,
this.headers,
});
}

Expand Down
Loading
Loading