class Sequel::Postgres::Dataset
Constants
- BindArgumentMethods
- PREPARED_ARG_PLACEHOLDER
- PreparedStatementMethods
Public Instance Methods
# File lib/sequel/adapters/postgres.rb, line 694 def bound_variable_modules [BindArgumentMethods] end
# File lib/sequel/adapters/postgres.rb, line 608 def fetch_rows(sql) return cursor_fetch_rows(sql){|h| yield h} if @opts[:cursor] execute(sql){|res| yield_hash_rows(res, fetch_rows_set_cols(res)){|h| yield h}} end
Use a cursor for paging.
# File lib/sequel/adapters/postgres.rb, line 614 def paged_each(opts=OPTS, &block) unless block_given? return enum_for(:paged_each, opts) end use_cursor(opts).each(&block) end
PostgreSQL uses $N for placeholders instead of ?, so use a $ as the placeholder.
# File lib/sequel/adapters/postgres.rb, line 704 def prepared_arg_placeholder PREPARED_ARG_PLACEHOLDER end
# File lib/sequel/adapters/postgres.rb, line 698 def prepared_statement_modules [PreparedStatementMethods] end
Uses a cursor for fetching records, instead of fetching the entire result set at once. Note this uses a transaction around the cursor usage by default and can be changed using `hold: true` as described below. Cursors can be used to process large datasets without holding all rows in memory (which is what the underlying drivers may do by default). Options:
- :cursor_name
-
The name assigned to the cursor (default 'sequel_cursor'). Nested cursors require different names.
- :hold
-
Declare the cursor WITH HOLD and don't use transaction around the cursor usage.
- :rows_per_fetch
-
The number of rows per fetch (default 1000). Higher numbers result in fewer queries but greater memory use.
Usage:
DB[:huge_table].use_cursor.each{|row| p row} DB[:huge_table].use_cursor(rows_per_fetch: 10000).each{|row| p row} DB[:huge_table].use_cursor(cursor_name: 'my_cursor').each{|row| p row}
This is untested with the prepared statement/bound variable support, and unlikely to work with either.
# File lib/sequel/adapters/postgres.rb, line 643 def use_cursor(opts=OPTS) clone(:cursor=>{:rows_per_fetch=>1000}.merge!(opts)) end
Replace the WHERE clause with one that uses CURRENT OF with the given cursor name (or the default cursor name). This allows you to update a large dataset by updating individual rows while processing the dataset via a cursor:
DB[:huge_table].use_cursor(rows_per_fetch: 1).each do |row| DB[:huge_table].where_current_of.update(column: ruby_method(row)) end
# File lib/sequel/adapters/postgres.rb, line 655 def where_current_of(cursor_name='sequel_cursor') clone(:where=>Sequel.lit(['CURRENT OF '], Sequel.identifier(cursor_name))) end
Private Instance Methods
Generate and execute a procedure call.
# File lib/sequel/adapters/postgres.rb, line 712 def call_procedure(name, args) sql = String.new sql << "CALL " identifier_append(sql, name) literal_append(sql, args) with_sql_first(sql) end
Use a cursor to fetch groups of records at a time, yielding them to the block.
# File lib/sequel/adapters/postgres.rb, line 721 def cursor_fetch_rows(sql) server_opts = {:server=>@opts[:server] || :read_only} cursor = @opts[:cursor] hold = cursor[:hold] cursor_name = quote_identifier(cursor[:cursor_name] || 'sequel_cursor') rows_per_fetch = cursor[:rows_per_fetch].to_i db.public_send(*(hold ? [:synchronize, server_opts[:server]] : [:transaction, server_opts])) do begin execute_ddl("DECLARE #{cursor_name} NO SCROLL CURSOR WITH#{'OUT' unless hold} HOLD FOR #{sql}", server_opts) rows_per_fetch = 1000 if rows_per_fetch <= 0 fetch_sql = "FETCH FORWARD #{rows_per_fetch} FROM #{cursor_name}" cols = nil # Load columns only in the first fetch, so subsequent fetches are faster execute(fetch_sql) do |res| cols = fetch_rows_set_cols(res) yield_hash_rows(res, cols){|h| yield h} return if res.ntuples < rows_per_fetch end while true execute(fetch_sql) do |res| yield_hash_rows(res, cols){|h| yield h} return if res.ntuples < rows_per_fetch end end rescue Exception => e raise ensure begin execute_ddl("CLOSE #{cursor_name}", server_opts) rescue raise e if e raise end end end end
Set the columns based on the result set, and return the array of field numers, type conversion procs, and name symbol arrays.
# File lib/sequel/adapters/postgres.rb, line 761 def fetch_rows_set_cols(res) cols = [] procs = db.conversion_procs res.nfields.times do |fieldnum| cols << [procs[res.ftype(fieldnum)], output_identifier(res.fname(fieldnum))] end self.columns = cols.map{|c| c[1]} cols end
Use the driver's escape_bytea
# File lib/sequel/adapters/postgres.rb, line 772 def literal_blob_append(sql, v) sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_bytea(v)} << "'" end
Use the driver's escape_string
# File lib/sequel/adapters/postgres.rb, line 777 def literal_string_append(sql, v) sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_string(v)} << "'" end
For each row in the result set, yield a hash with column name symbol keys and typecasted values.
# File lib/sequel/adapters/postgres.rb, line 783 def yield_hash_rows(res, cols) ntuples = res.ntuples recnum = 0 while recnum < ntuples fieldnum = 0 nfields = cols.length converted_rec = {} while fieldnum < nfields type_proc, fieldsym = cols[fieldnum] value = res.getvalue(recnum, fieldnum) converted_rec[fieldsym] = (value && type_proc) ? type_proc.call(value) : value fieldnum += 1 end yield converted_rec recnum += 1 end end